aboutsummaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2018-05-31 11:11:57 +0200
committerMyles Borins <mylesborins@google.com>2018-06-01 09:58:27 +0200
commit352a525eb984b8fa2d6f0f6fd68395e6a080bba4 (patch)
treea105ae93f8fd8f533cce19a429f1b6e95d6e11ca /deps/v8/src
parentfaf449ca0490f5371dc6cbbc94a87eb697b00fcc (diff)
downloadandroid-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.gz
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.tar.bz2
android-node-v8-352a525eb984b8fa2d6f0f6fd68395e6a080bba4.zip
deps: update V8 to 6.7.288.43
PR-URL: https://github.com/nodejs/node/pull/19989 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matheus Marchini <matheus@sthima.com> Reviewed-By: Gus Caplan <me@gus.host> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc23
-rw-r--r--deps/v8/src/accessors.h20
-rw-r--r--deps/v8/src/allocation.cc4
-rw-r--r--deps/v8/src/allocation.h11
-rw-r--r--deps/v8/src/api-arguments-inl.h182
-rw-r--r--deps/v8/src/api-arguments.cc57
-rw-r--r--deps/v8/src/api-arguments.h47
-rw-r--r--deps/v8/src/api-natives.cc2
-rw-r--r--deps/v8/src/api-natives.h8
-rw-r--r--deps/v8/src/api.cc480
-rw-r--r--deps/v8/src/api.h9
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h22
-rw-r--r--deps/v8/src/arm/assembler-arm.cc16
-rw-r--r--deps/v8/src/arm/assembler-arm.h24
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc44
-rw-r--r--deps/v8/src/arm/codegen-arm.cc1
-rw-r--r--deps/v8/src/arm/frame-constants-arm.h24
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc47
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h11
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h22
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc3
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h53
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc43
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc1
-rw-r--r--deps/v8/src/arm64/frame-constants-arm64.h22
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc45
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h9
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc12
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h1
-rw-r--r--deps/v8/src/asmjs/asm-js.cc64
-rw-r--r--deps/v8/src/asmjs/asm-js.h9
-rw-r--r--deps/v8/src/assembler.cc1039
-rw-r--r--deps/v8/src/assembler.h413
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h18
-rw-r--r--deps/v8/src/ast/ast-value-factory.h2
-rw-r--r--deps/v8/src/ast/ast.cc17
-rw-r--r--deps/v8/src/ast/ast.h92
-rw-r--r--deps/v8/src/ast/compile-time-value.cc2
-rw-r--r--deps/v8/src/ast/prettyprinter.cc30
-rw-r--r--deps/v8/src/ast/scopes.cc1
-rw-r--r--deps/v8/src/ast/scopes.h16
-rw-r--r--deps/v8/src/bailout-reason.h4
-rw-r--r--deps/v8/src/base.isolate39
-rw-r--r--deps/v8/src/base/compiler-specific.h10
-rw-r--r--deps/v8/src/base/debug/stack_trace_posix.cc7
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc13
-rw-r--r--deps/v8/src/base/flags.h86
-rw-r--r--deps/v8/src/base/ieee754.cc10
-rw-r--r--deps/v8/src/base/logging.h18
-rw-r--r--deps/v8/src/base/macros.h82
-rw-r--r--deps/v8/src/base/platform/condition-variable.h2
-rw-r--r--deps/v8/src/base/platform/mutex.h4
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc10
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.cc4
-rw-r--r--deps/v8/src/base/platform/platform-posix-time.h2
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc7
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc5
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc35
-rw-r--r--deps/v8/src/base/platform/platform.h14
-rw-r--r--deps/v8/src/base/platform/semaphore.h2
-rw-r--r--deps/v8/src/base/template-utils.h19
-rw-r--r--deps/v8/src/base/timezone-cache.h4
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h21
-rw-r--r--deps/v8/src/bootstrapper.cc284
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/boxed-float.h6
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc319
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc315
-rw-r--r--deps/v8/src/builtins/builtins-api.cc67
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1567
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h19
-rw-r--r--deps/v8/src/builtins/builtins-array.cc84
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc41
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc80
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc39
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc37
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc137
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc63
-rw-r--r--deps/v8/src/builtins/builtins-console.cc4
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc246
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h3
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc176
-rw-r--r--deps/v8/src/builtins/builtins-date.cc155
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h76
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc28
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-ic-gen.cc1
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc64
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-interpreter.cc17
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc51
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc39
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc489
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h32
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc102
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc396
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc6
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc146
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h7
-rw-r--r--deps/v8/src/builtins/builtins-string.cc4
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc32
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc147
-rw-r--r--deps/v8/src/builtins/builtins-utils.h12
-rw-r--r--deps/v8/src/builtins/builtins.cc820
-rw-r--r--deps/v8/src/builtins/builtins.h30
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc3
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h4
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.cc4
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc316
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc312
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc313
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc322
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc310
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc25
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc313
-rw-r--r--deps/v8/src/cancelable-task.cc1
-rw-r--r--deps/v8/src/char-predicates-inl.h4
-rw-r--r--deps/v8/src/code-events.h56
-rw-r--r--deps/v8/src/code-stub-assembler.cc1313
-rw-r--r--deps/v8/src/code-stub-assembler.h606
-rw-r--r--deps/v8/src/code-stubs.cc15
-rw-r--r--deps/v8/src/code-stubs.h41
-rw-r--r--deps/v8/src/compilation-cache.cc2
-rw-r--r--deps/v8/src/compilation-dependencies.cc2
-rw-r--r--deps/v8/src/compilation-statistics.h2
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc46
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h14
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc34
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h14
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc36
-rw-r--r--deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h5
-rw-r--r--deps/v8/src/compiler.cc711
-rw-r--r--deps/v8/src/compiler.h193
-rw-r--r--deps/v8/src/compiler/OWNERS2
-rw-r--r--deps/v8/src/compiler/access-builder.cc36
-rw-r--r--deps/v8/src/compiler/access-builder.h13
-rw-r--r--deps/v8/src/compiler/access-info.h2
-rw-r--r--deps/v8/src/compiler/allocation-builder.h10
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc105
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc161
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc49
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h22
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc26
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc213
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc7
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h4
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc58
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/code-assembler.cc95
-rw-r--r--deps/v8/src/compiler/code-assembler.h33
-rw-r--r--deps/v8/src/compiler/code-generator.cc104
-rw-r--r--deps/v8/src/compiler/code-generator.h47
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc4
-rw-r--r--deps/v8/src/compiler/common-operator.cc44
-rw-r--r--deps/v8/src/compiler/common-operator.h77
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc5
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc391
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h15
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc3
-rw-r--r--deps/v8/src/compiler/frame-states.cc51
-rw-r--r--deps/v8/src/compiler/frame-states.h20
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc11
-rw-r--r--deps/v8/src/compiler/graph-assembler.h1
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc9
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h9
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc190
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h15
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc13
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc230
-rw-r--r--deps/v8/src/compiler/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc54
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h16
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h285
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc440
-rw-r--r--deps/v8/src/compiler/instruction-selector.h258
-rw-r--r--deps/v8/src/compiler/instruction.cc4
-rw-r--r--deps/v8/src/compiler/instruction.h12
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc1357
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h35
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1537
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h41
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc2
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc135
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc29
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc16
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc17
-rw-r--r--deps/v8/src/compiler/js-inlining.h6
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc122
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h1
-rw-r--r--deps/v8/src/compiler/js-operator.cc102
-rw-r--r--deps/v8/src/compiler/js-operator.h75
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc83
-rw-r--r--deps/v8/src/compiler/linkage.cc5
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/load-elimination.cc4
-rw-r--r--deps/v8/src/compiler/load-elimination.h1
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator.cc214
-rw-r--r--deps/v8/src/compiler/machine-operator.h49
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc15
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h9
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc436
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc96
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc435
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc102
-rw-r--r--deps/v8/src/compiler/node-matchers.h16
-rw-r--r--deps/v8/src/compiler/node-properties.cc1
-rw-r--r--deps/v8/src/compiler/node.h6
-rw-r--r--deps/v8/src/compiler/opcodes.h31
-rw-r--r--deps/v8/src/compiler/operation-typer.cc2
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/osr.cc8
-rw-r--r--deps/v8/src/compiler/osr.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc6
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc283
-rw-r--r--deps/v8/src/compiler/pipeline.h32
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc53
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc73
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc25
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc12
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h44
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc1
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc18
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h5
-rw-r--r--deps/v8/src/compiler/register-allocator.cc37
-rw-r--r--deps/v8/src/compiler/register-allocator.h9
-rw-r--r--deps/v8/src/compiler/representation-change.cc30
-rw-r--r--deps/v8/src/compiler/representation-change.h2
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc43
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc149
-rw-r--r--deps/v8/src/compiler/schedule.cc19
-rw-r--r--deps/v8/src/compiler/schedule.h4
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc47
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc150
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc69
-rw-r--r--deps/v8/src/compiler/simplified-operator.h107
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc4
-rw-r--r--deps/v8/src/compiler/type-cache.h4
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc148
-rw-r--r--deps/v8/src/compiler/typed-optimization.h8
-rw-r--r--deps/v8/src/compiler/typer.cc71
-rw-r--r--deps/v8/src/compiler/types.cc34
-rw-r--r--deps/v8/src/compiler/types.h52
-rw-r--r--deps/v8/src/compiler/verifier.cc83
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc1192
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h204
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc42
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc288
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h484
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc40
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc295
-rw-r--r--deps/v8/src/contexts-inl.h27
-rw-r--r--deps/v8/src/contexts.h94
-rw-r--r--deps/v8/src/conversions.cc2
-rw-r--r--deps/v8/src/counters.h14
-rw-r--r--deps/v8/src/d8-console.cc2
-rw-r--r--deps/v8/src/d8-posix.cc8
-rw-r--r--deps/v8/src/d8-windows.cc8
-rw-r--r--deps/v8/src/d8.cc129
-rw-r--r--deps/v8/src/d8.h18
-rw-r--r--deps/v8/src/d8.isolate16
-rw-r--r--deps/v8/src/date.cc72
-rw-r--r--deps/v8/src/date.h65
-rw-r--r--deps/v8/src/debug/debug-coverage.cc62
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc276
-rw-r--r--deps/v8/src/debug/debug-evaluate.h20
-rw-r--r--deps/v8/src/debug/debug-interface.h24
-rw-r--r--deps/v8/src/debug/debug-scopes.cc17
-rw-r--r--deps/v8/src/debug/debug-scopes.h8
-rw-r--r--deps/v8/src/debug/debug.cc407
-rw-r--r--deps/v8/src/debug/debug.h88
-rw-r--r--deps/v8/src/debug/liveedit.cc61
-rw-r--r--deps/v8/src/debug/liveedit.h5
-rw-r--r--deps/v8/src/debug/liveedit.js2
-rw-r--r--deps/v8/src/debug/mirrors.js4
-rw-r--r--deps/v8/src/deoptimizer.cc328
-rw-r--r--deps/v8/src/deoptimizer.h74
-rw-r--r--deps/v8/src/disassembler.cc6
-rw-r--r--deps/v8/src/elements.cc5
-rw-r--r--deps/v8/src/elements.h9
-rw-r--r--deps/v8/src/execution.cc10
-rw-r--r--deps/v8/src/execution.h17
-rw-r--r--deps/v8/src/external-reference-table.cc423
-rw-r--r--deps/v8/src/external-reference-table.h55
-rw-r--r--deps/v8/src/external-reference.cc1028
-rw-r--r--deps/v8/src/external-reference.h305
-rw-r--r--deps/v8/src/feedback-vector-inl.h52
-rw-r--r--deps/v8/src/feedback-vector.cc80
-rw-r--r--deps/v8/src/feedback-vector.h68
-rw-r--r--deps/v8/src/flag-definitions.h72
-rw-r--r--deps/v8/src/frame-constants.h145
-rw-r--r--deps/v8/src/frames-inl.h10
-rw-r--r--deps/v8/src/frames.cc210
-rw-r--r--deps/v8/src/frames.h57
-rw-r--r--deps/v8/src/gdb-jit.cc28
-rw-r--r--deps/v8/src/global-handles.cc14
-rw-r--r--deps/v8/src/globals.h106
-rw-r--r--deps/v8/src/handles.cc9
-rw-r--r--deps/v8/src/heap-symbols.h6
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc8
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h12
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc35
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h12
-rw-r--r--deps/v8/src/heap/code-stats.cc8
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc199
-rw-r--r--deps/v8/src/heap/factory-inl.h (renamed from deps/v8/src/factory-inl.h)9
-rw-r--r--deps/v8/src/heap/factory.cc (renamed from deps/v8/src/factory.cc)1894
-rw-r--r--deps/v8/src/heap/factory.h (renamed from deps/v8/src/factory.h)278
-rw-r--r--deps/v8/src/heap/gc-tracer.cc58
-rw-r--r--deps/v8/src/heap/gc-tracer.h14
-rw-r--r--deps/v8/src/heap/heap-inl.h235
-rw-r--r--deps/v8/src/heap/heap.cc1949
-rw-r--r--deps/v8/src/heap/heap.h556
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h18
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc4
-rw-r--r--deps/v8/src/heap/incremental-marking.cc66
-rw-r--r--deps/v8/src/heap/incremental-marking.h10
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h2
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc12
-rw-r--r--deps/v8/src/heap/item-parallel-job.h4
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h74
-rw-r--r--deps/v8/src/heap/mark-compact.cc2504
-rw-r--r--deps/v8/src/heap/mark-compact.h184
-rw-r--r--deps/v8/src/heap/marking.cc10
-rw-r--r--deps/v8/src/heap/marking.h2
-rw-r--r--deps/v8/src/heap/memory-reducer.cc2
-rw-r--r--deps/v8/src/heap/object-stats.cc77
-rw-r--r--deps/v8/src/heap/object-stats.h3
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h39
-rw-r--r--deps/v8/src/heap/objects-visiting.h8
-rw-r--r--deps/v8/src/heap/remembered-set.h19
-rw-r--r--deps/v8/src/heap/scavenge-job.cc2
-rw-r--r--deps/v8/src/heap/scavenger-inl.h88
-rw-r--r--deps/v8/src/heap/scavenger.cc77
-rw-r--r--deps/v8/src/heap/scavenger.h16
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc250
-rw-r--r--deps/v8/src/heap/spaces-inl.h6
-rw-r--r--deps/v8/src/heap/spaces.cc115
-rw-r--r--deps/v8/src/heap/spaces.h75
-rw-r--r--deps/v8/src/heap/store-buffer.cc10
-rw-r--r--deps/v8/src/heap/sweeper.cc77
-rw-r--r--deps/v8/src/heap/sweeper.h20
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h26
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc25
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h44
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc118
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc3
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc30
-rw-r--r--deps/v8/src/ia32/frame-constants-ia32.h36
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc49
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h18
-rw-r--r--deps/v8/src/ia32/sse-instr.h2
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc695
-rw-r--r--deps/v8/src/ic/accessor-assembler.h79
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h28
-rw-r--r--deps/v8/src/ic/handler-configuration.cc74
-rw-r--r--deps/v8/src/ic/handler-configuration.h24
-rw-r--r--deps/v8/src/ic/ic.cc417
-rw-r--r--deps/v8/src/ic/ic.h61
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc325
-rw-r--r--deps/v8/src/inspector/injected-script-source.js21
-rw-r--r--deps/v8/src/inspector/injected-script.cc31
-rw-r--r--deps/v8/src/inspector/injected-script.h2
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json2
-rw-r--r--deps/v8/src/inspector/js_protocol.json66
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl34
-rw-r--r--deps/v8/src/inspector/string-16.cc25
-rw-r--r--deps/v8/src/inspector/string-16.h2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc10
-rw-r--r--deps/v8/src/inspector/v8-console.cc41
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc95
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h13
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc82
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h21
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc66
-rw-r--r--deps/v8/src/inspector/v8-debugger.h11
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc3
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc21
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc5
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc13
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc42
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h8
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc213
-rw-r--r--deps/v8/src/inspector/wasm-translation.h7
-rw-r--r--deps/v8/src/instruction-stream.cc95
-rw-r--r--deps/v8/src/instruction-stream.h35
-rw-r--r--deps/v8/src/interface-descriptors.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc11
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc279
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h19
-rw-r--r--deps/v8/src/interpreter/bytecodes.h3
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h16
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc164
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h64
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc93
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc22
-rw-r--r--deps/v8/src/interpreter/interpreter.cc43
-rw-r--r--deps/v8/src/interpreter/interpreter.h5
-rw-r--r--deps/v8/src/interpreter/setup-interpreter-internal.cc2
-rw-r--r--deps/v8/src/intl.cc55
-rw-r--r--deps/v8/src/intl.h27
-rw-r--r--deps/v8/src/isolate-inl.h25
-rw-r--r--deps/v8/src/isolate.cc389
-rw-r--r--deps/v8/src/isolate.h73
-rw-r--r--deps/v8/src/js/array.js50
-rw-r--r--deps/v8/src/js/intl.js89
-rw-r--r--deps/v8/src/js/messages.js70
-rw-r--r--deps/v8/src/js/prologue.js36
-rw-r--r--deps/v8/src/js/spread.js51
-rw-r--r--deps/v8/src/js/typedarray.js27
-rw-r--r--deps/v8/src/json-parser.cc7
-rw-r--r--deps/v8/src/json-parser.h9
-rw-r--r--deps/v8/src/json-stringifier.h13
-rw-r--r--deps/v8/src/keys.cc12
-rw-r--r--deps/v8/src/keys.h3
-rw-r--r--deps/v8/src/layout-descriptor.h6
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.cc23
-rw-r--r--deps/v8/src/libplatform/default-foreground-task-runner.h12
-rw-r--r--deps/v8/src/libplatform/default-platform.cc32
-rw-r--r--deps/v8/src/libplatform/default-platform.h11
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.cc (renamed from deps/v8/src/libplatform/default-background-task-runner.cc)25
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.h (renamed from deps/v8/src/libplatform/default-background-task-runner.h)12
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.cc13
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.h1
-rw-r--r--deps/v8/src/log-utils.h4
-rw-r--r--deps/v8/src/log.cc230
-rw-r--r--deps/v8/src/log.h19
-rw-r--r--deps/v8/src/lookup.cc134
-rw-r--r--deps/v8/src/lookup.h19
-rw-r--r--deps/v8/src/macro-assembler.h1
-rw-r--r--deps/v8/src/managed.h2
-rw-r--r--deps/v8/src/messages.cc33
-rw-r--r--deps/v8/src/messages.h16
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h22
-rw-r--r--deps/v8/src/mips/assembler-mips.cc14
-rw-r--r--deps/v8/src/mips/assembler-mips.h9
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc70
-rw-r--r--deps/v8/src/mips/codegen-mips.cc1
-rw-r--r--deps/v8/src/mips/frame-constants-mips.h28
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc1172
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h179
-rw-r--r--deps/v8/src/mips/simulator-mips.cc4
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h26
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc14
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h9
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc70
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc1
-rw-r--r--deps/v8/src/mips64/frame-constants-mips64.h28
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc917
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h158
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc4
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h359
-rw-r--r--deps/v8/src/objects-body-descriptors.h31
-rw-r--r--deps/v8/src/objects-debug.cc276
-rw-r--r--deps/v8/src/objects-inl.h123
-rw-r--r--deps/v8/src/objects-printer.cc284
-rw-r--r--deps/v8/src/objects.cc922
-rw-r--r--deps/v8/src/objects.h589
-rw-r--r--deps/v8/src/objects/bigint.cc145
-rw-r--r--deps/v8/src/objects/bigint.h23
-rw-r--r--deps/v8/src/objects/code-inl.h125
-rw-r--r--deps/v8/src/objects/code.h98
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h2
-rw-r--r--deps/v8/src/objects/debug-objects.cc20
-rw-r--r--deps/v8/src/objects/debug-objects.h31
-rw-r--r--deps/v8/src/objects/descriptor-array.h8
-rw-r--r--deps/v8/src/objects/dictionary.h41
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h39
-rw-r--r--deps/v8/src/objects/fixed-array.h86
-rw-r--r--deps/v8/src/objects/frame-array.h2
-rw-r--r--deps/v8/src/objects/hash-table.h59
-rw-r--r--deps/v8/src/objects/intl-objects.cc41
-rw-r--r--deps/v8/src/objects/intl-objects.h17
-rw-r--r--deps/v8/src/objects/js-array-inl.h83
-rw-r--r--deps/v8/src/objects/js-array.h90
-rw-r--r--deps/v8/src/objects/js-promise.h7
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h35
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator.h59
-rw-r--r--deps/v8/src/objects/literal-objects.cc2
-rw-r--r--deps/v8/src/objects/literal-objects.h1
-rw-r--r--deps/v8/src/objects/map-inl.h12
-rw-r--r--deps/v8/src/objects/map.h83
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h92
-rw-r--r--deps/v8/src/objects/maybe-object.h108
-rw-r--r--deps/v8/src/objects/module.cc23
-rw-r--r--deps/v8/src/objects/module.h29
-rw-r--r--deps/v8/src/objects/name.h5
-rw-r--r--deps/v8/src/objects/object-macros-undef.h6
-rw-r--r--deps/v8/src/objects/object-macros.h48
-rw-r--r--deps/v8/src/objects/scope-info.cc370
-rw-r--r--deps/v8/src/objects/scope-info.h130
-rw-r--r--deps/v8/src/objects/script-inl.h3
-rw-r--r--deps/v8/src/objects/script.h4
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h399
-rw-r--r--deps/v8/src/objects/shared-function-info.h237
-rw-r--r--deps/v8/src/objects/string-inl.h2
-rw-r--r--deps/v8/src/objects/string-table.h11
-rw-r--r--deps/v8/src/objects/string.h10
-rw-r--r--deps/v8/src/objects/template-objects.cc2
-rw-r--r--deps/v8/src/optimized-compilation-info.cc (renamed from deps/v8/src/compilation-info.cc)125
-rw-r--r--deps/v8/src/optimized-compilation-info.h (renamed from deps/v8/src/compilation-info.h)173
-rw-r--r--deps/v8/src/parsing/parse-info.cc34
-rw-r--r--deps/v8/src/parsing/parse-info.h12
-rw-r--r--deps/v8/src/parsing/parser-base.h6
-rw-r--r--deps/v8/src/parsing/parser.cc143
-rw-r--r--deps/v8/src/parsing/parser.h11
-rw-r--r--deps/v8/src/parsing/parsing.cc2
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc58
-rw-r--r--deps/v8/src/parsing/scanner.cc182
-rw-r--r--deps/v8/src/parsing/scanner.h27
-rw-r--r--deps/v8/src/perf-jit.cc50
-rw-r--r--deps/v8/src/perf-jit.h11
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h23
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc38
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h9
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc49
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc1
-rw-r--r--deps/v8/src/ppc/constants-ppc.h4
-rw-r--r--deps/v8/src/ppc/frame-constants-ppc.h24
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc127
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h34
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc2
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc10
-rw-r--r--deps/v8/src/profiler/heap-profiler.h7
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc214
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h10
-rw-r--r--deps/v8/src/profiler/profile-generator.cc2
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc28
-rw-r--r--deps/v8/src/profiler/profiler-listener.h6
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc2
-rw-r--r--deps/v8/src/property-descriptor.cc2
-rw-r--r--deps/v8/src/prototype.h4
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc6
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc4
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc6
-rw-r--r--deps/v8/src/regexp/jsregexp.cc4
-rw-r--r--deps/v8/src/regexp/jsregexp.h10
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc4
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc4
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc4
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc2
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc2
-rw-r--r--deps/v8/src/regexp/regexp-utils.h8
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc6
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc6
-rw-r--r--deps/v8/src/register-configuration.cc20
-rw-r--r--deps/v8/src/runtime-profiler.cc16
-rw-r--r--deps/v8/src/runtime/runtime-array.cc22
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc2
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc10
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc2
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc15
-rw-r--r--deps/v8/src/runtime/runtime-date.cc2
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc50
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc14
-rw-r--r--deps/v8/src/runtime/runtime-function.cc27
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc6
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc31
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc23
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc5
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc6
-rw-r--r--deps/v8/src/runtime/runtime-object.cc32
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc1
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc2
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc19
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc13
-rw-r--r--deps/v8/src/runtime/runtime-test.cc59
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc39
-rw-r--r--deps/v8/src/runtime/runtime.cc33
-rw-r--r--deps/v8/src/runtime/runtime.h571
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h24
-rw-r--r--deps/v8/src/s390/assembler-s390.cc3
-rw-r--r--deps/v8/src/s390/assembler-s390.h8
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc45
-rw-r--r--deps/v8/src/s390/codegen-s390.cc1
-rw-r--r--deps/v8/src/s390/frame-constants-s390.h24
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc51
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h23
-rw-r--r--deps/v8/src/simulator-base.h1
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer-allocator.h16
-rw-r--r--deps/v8/src/snapshot/builtin-deserializer.cc30
-rw-r--r--deps/v8/src/snapshot/builtin-serializer-allocator.cc6
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc153
-rw-r--r--deps/v8/src/snapshot/code-serializer.h29
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.cc15
-rw-r--r--deps/v8/src/snapshot/default-deserializer-allocator.h15
-rw-r--r--deps/v8/src/snapshot/default-serializer-allocator.cc14
-rw-r--r--deps/v8/src/snapshot/deserializer.cc232
-rw-r--r--deps/v8/src/snapshot/deserializer.h23
-rw-r--r--deps/v8/src/snapshot/embedded-empty.cc24
-rw-r--r--deps/v8/src/snapshot/macros.h78
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc275
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc11
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc12
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc9
-rw-r--r--deps/v8/src/snapshot/serializer-common.h108
-rw-r--r--deps/v8/src/snapshot/serializer.cc77
-rw-r--r--deps/v8/src/snapshot/serializer.h20
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc156
-rw-r--r--deps/v8/src/snapshot/snapshot.h58
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc17
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h6
-rw-r--r--deps/v8/src/source-position-table.cc31
-rw-r--r--deps/v8/src/source-position-table.h8
-rw-r--r--deps/v8/src/source-position.cc4
-rw-r--r--deps/v8/src/source-position.h7
-rw-r--r--deps/v8/src/startup-data-util.cc9
-rw-r--r--deps/v8/src/string-builder.h2
-rw-r--r--deps/v8/src/string-stream.cc4
-rw-r--r--deps/v8/src/third_party/vtune/BUILD.gn2
-rw-r--r--deps/v8/src/transitions-inl.h57
-rw-r--r--deps/v8/src/transitions.cc195
-rw-r--r--deps/v8/src/transitions.h100
-rw-r--r--deps/v8/src/trap-handler/handler-outside.cc47
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h25
-rw-r--r--deps/v8/src/unoptimized-compilation-info.cc64
-rw-r--r--deps/v8/src/unoptimized-compilation-info.h138
-rw-r--r--deps/v8/src/utils.h45
-rw-r--r--deps/v8/src/v8.h7
-rw-r--r--deps/v8/src/v8memory.h4
-rw-r--r--deps/v8/src/value-serializer.cc62
-rw-r--r--deps/v8/src/value-serializer.h99
-rw-r--r--deps/v8/src/visitors.h9
-rw-r--r--deps/v8/src/wasm/OWNERS5
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h120
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h123
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h639
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc76
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h195
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc740
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h19
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS3
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h566
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS3
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h421
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h125
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h125
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h566
-rw-r--r--deps/v8/src/wasm/compilation-manager.cc19
-rw-r--r--deps/v8/src/wasm/compilation-manager.h9
-rw-r--r--deps/v8/src/wasm/decoder.h3
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h141
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc93
-rw-r--r--deps/v8/src/wasm/module-compiler.cc3365
-rw-r--r--deps/v8/src/wasm/module-compiler.h69
-rw-r--r--deps/v8/src/wasm/module-decoder.cc213
-rw-r--r--deps/v8/src/wasm/module-decoder.h16
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc650
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h185
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc238
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.h34
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.cc76
-rw-r--r--deps/v8/src/wasm/wasm-code-wrapper.h50
-rw-r--r--deps/v8/src/wasm/wasm-constants.h3
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc147
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc18
-rw-r--r--deps/v8/src/wasm/wasm-engine.h16
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc490
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h17
-rw-r--r--deps/v8/src/wasm/wasm-js.cc272
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc274
-rw-r--r--deps/v8/src/wasm/wasm-memory.h101
-rw-r--r--deps/v8/src/wasm/wasm-module.cc174
-rw-r--r--deps/v8/src/wasm/wasm-module.h51
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h230
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc1099
-rw-r--r--deps/v8/src/wasm/wasm-objects.h632
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc40
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h254
-rw-r--r--deps/v8/src/wasm/wasm-result.cc2
-rw-r--r--deps/v8/src/wasm/wasm-result.h2
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc197
-rw-r--r--deps/v8/src/wasm/wasm-text.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h26
-rw-r--r--deps/v8/src/x64/assembler-x64.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64.h77
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc114
-rw-r--r--deps/v8/src/x64/codegen-x64.cc1
-rw-r--r--deps/v8/src/x64/disasm-x64.cc12
-rw-r--r--deps/v8/src/x64/frame-constants-x64.h36
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc111
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h32
-rw-r--r--deps/v8/src/zone/zone.cc6
702 files changed, 40251 insertions, 31776 deletions
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index bbf47e6107..050f91d6d6 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -8,6 +8,8 @@ include_rules = [
"+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
+ "+src/heap/factory.h",
+ "+src/heap/factory-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"-src/inspector",
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index eb89288685..f292988b8e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -8,8 +8,8 @@
#include "src/contexts.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
-#include "src/factory.h"
#include "src/frames-inl.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/property-details.h"
@@ -28,6 +28,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_is_special_data_property(true);
info->set_is_sloppy(false);
info->set_replace_on_access(false);
+ info->set_has_no_side_effect(false);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@@ -78,7 +79,7 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
namespace {
-MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
Handle<Name> name, Handle<Object> value) {
LookupIterator it(receiver, name, holder,
@@ -97,6 +98,9 @@ MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
} // namespace
+//
+// Accessors::ReconfigureToDataProperty
+//
void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
@@ -118,6 +122,18 @@ void Accessors::ReconfigureToDataProperty(
}
}
+void Accessors::ReconfigureToDataPropertyGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ UNREACHABLE();
+}
+
+Handle<AccessorInfo> Accessors::MakeReconfigureToDataPropertyInfo(
+ Isolate* isolate) {
+ Handle<Name> name = isolate->factory()->ReconfigureToDataProperty_string();
+ return MakeAccessor(isolate, name, &ReconfigureToDataPropertyGetter,
+ &ReconfigureToDataProperty);
+}
+
//
// Accessors::ArgumentsIterator
//
@@ -299,7 +315,6 @@ Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
&StringLengthGetter, nullptr);
}
-
//
// Accessors::ScriptColumnOffset
//
@@ -613,7 +628,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
- result = Handle<Object>(shared->name(), isolate);
+ result = Handle<Object>(shared->Name(), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 70e6a9200e..1911f92dbf 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -33,6 +33,7 @@ class JavaScriptFrame;
V(function_name, FunctionName) \
V(function_length, FunctionLength) \
V(function_prototype, FunctionPrototype) \
+ V(reconfigure_to_data_property, ReconfigureToDataProperty) \
V(script_column_offset, ScriptColumnOffset) \
V(script_compilation_type, ScriptCompilationType) \
V(script_context_data, ScriptContextData) \
@@ -48,6 +49,15 @@ class JavaScriptFrame;
V(script_source_mapping_url, ScriptSourceMappingUrl) \
V(string_length, StringLength)
+#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \
+ V(ArrayLength) \
+ V(BoundFunctionLength) \
+ V(BoundFunctionName) \
+ V(FunctionName) \
+ V(FunctionLength) \
+ V(FunctionPrototype) \
+ V(StringLength)
+
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
V(ErrorStackSetter) \
@@ -73,6 +83,16 @@ class Accessors : public AllStatic {
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_SETTER_DECLARATION
+ static constexpr int kAccessorInfoCount =
+#define COUNT_ACCESSOR(...) +1
+ ACCESSOR_INFO_LIST(COUNT_ACCESSOR);
+#undef COUNT_ACCESSOR
+
+ static constexpr int kAccessorSetterCount =
+#define COUNT_ACCESSOR(...) +1
+ ACCESSOR_SETTER_LIST(COUNT_ACCESSOR);
+#undef COUNT_ACCESSOR
+
static void ModuleNamespaceEntryGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info);
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 5493b34789..f63c2f292f 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -69,7 +69,7 @@ const int kAllocationTries = 2;
void* Malloced::New(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
- V8::FatalProcessOutOfMemory("Malloced operator new");
+ V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
}
return result;
}
@@ -115,7 +115,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
if (!OnCriticalMemoryPressure(size + alignment)) break;
}
if (result == nullptr) {
- V8::FatalProcessOutOfMemory("AlignedAlloc");
+ V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
}
return result;
}
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index 9bb47c8f05..13dc3e508f 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -14,13 +14,16 @@
namespace v8 {
namespace internal {
+class Isolate;
+
// This file defines memory allocation functions. If a first attempt at an
// allocation fails, these functions call back into the embedder, then attempt
// the allocation a second time. The embedder callback must not reenter V8.
// Called when allocation routines fail to allocate, even with a possible retry.
// This function should not return, but should terminate the current processing.
-V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(const char* message);
+[[noreturn]] V8_EXPORT_PRIVATE void FatalProcessOutOfMemory(
+ Isolate* isolate, const char* message);
// Superclass for classes managed with new & delete.
class V8_EXPORT_PRIVATE Malloced {
@@ -38,13 +41,13 @@ T* NewArray(size_t size) {
if (result == nullptr) {
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
result = new (std::nothrow) T[size];
- if (result == nullptr) FatalProcessOutOfMemory("NewArray");
+ if (result == nullptr) FatalProcessOutOfMemory(nullptr, "NewArray");
}
return result;
}
-template <typename T,
- typename = typename std::enable_if<IS_TRIVIALLY_COPYABLE(T)>::type>
+template <typename T, typename = typename std::enable_if<
+ base::is_trivially_copyable<T>::value>::type>
T* NewArray(size_t size, T default_val) {
T* result = reinterpret_cast<T*>(NewArray<uint8_t>(sizeof(T) * size));
for (size_t i = 0; i < size; ++i) result[i] = default_val;
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index b8336f97c4..1cf9662b94 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -13,56 +13,63 @@
namespace v8 {
namespace internal {
-#define FOR_EACH_CALLBACK(F) \
- F(Query, query, Object, v8::Integer) \
- F(Deleter, deleter, Object, v8::Boolean)
-
-#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE) \
- if (ISOLATE->needs_side_effect_check() && \
- !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
- return RETURN_VALUE(); \
- } \
- VMState<EXTERNAL> state(ISOLATE); \
- ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
+#define FOR_EACH_CALLBACK(F) \
+ F(Query, query, Object, v8::Integer, interceptor) \
+ F(Deleter, deleter, Object, v8::Boolean, Handle<Object>())
+
+#define DCHECK_NAME_COMPATIBLE(interceptor, name) \
+ DCHECK(interceptor->is_named()); \
+ DCHECK(!name->IsPrivate()); \
+ DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+
+#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \
+ CALLBACK_INFO) \
+ if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \
+ !ISOLATE->debug()->PerformSideEffectCheckForCallback(CALLBACK_INFO)) { \
+ return RETURN_VALUE(); \
+ } \
+ VMState<EXTERNAL> state(ISOLATE); \
+ ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
-#define CREATE_NAMED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
- Handle<ReturnType> PropertyCallbackArguments::CallNamed##Function( \
+#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT) \
+ Handle<RETURN_TYPE> PropertyCallbackArguments::CallNamed##FUNCTION( \
Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
- DCHECK(interceptor->is_named()); \
- DCHECK(!name->IsPrivate()); \
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols()); \
+ DCHECK_NAME_COMPATIBLE(interceptor, name); \
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer( \
- isolate, RuntimeCallCounterId::kNamed##Function##Callback); \
- DCHECK(!name->IsPrivate()); \
- GenericNamedProperty##Function##Callback f = \
- ToCData<GenericNamedProperty##Function##Callback>( \
- interceptor->type()); \
- PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
+ isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
+ GenericNamedProperty##FUNCTION##Callback f = \
+ ToCData<GenericNamedProperty##FUNCTION##Callback>( \
+ interceptor->TYPE()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT); \
LOG(isolate, \
- ApiNamedPropertyAccess("interceptor-named-" #type, holder(), *name)); \
+ ApiNamedPropertyAccess("interceptor-named-" #TYPE, holder(), *name)); \
f(v8::Utils::ToLocal(name), callback_info); \
- return GetReturnValue<ReturnType>(isolate); \
+ return GetReturnValue<RETURN_TYPE>(isolate); \
}
FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
#undef CREATE_NAMED_CALLBACK
-#define CREATE_INDEXED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
- Handle<ReturnType> PropertyCallbackArguments::CallIndexed##Function( \
- Handle<InterceptorInfo> interceptor, uint32_t index) { \
- DCHECK(!interceptor->is_named()); \
- Isolate* isolate = this->isolate(); \
- RuntimeCallTimerScope timer( \
- isolate, RuntimeCallCounterId::kIndexed##Function##Callback); \
- IndexedProperty##Function##Callback f = \
- ToCData<IndexedProperty##Function##Callback>(interceptor->type()); \
- PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
- LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #type, \
- holder(), index)); \
- f(index, callback_info); \
- return GetReturnValue<ReturnType>(isolate); \
+#define CREATE_INDEXED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT) \
+ Handle<RETURN_TYPE> PropertyCallbackArguments::CallIndexed##FUNCTION( \
+ Handle<InterceptorInfo> interceptor, uint32_t index) { \
+ DCHECK(!interceptor->is_named()); \
+ Isolate* isolate = this->isolate(); \
+ RuntimeCallTimerScope timer( \
+ isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \
+ IndexedProperty##FUNCTION##Callback f = \
+ ToCData<IndexedProperty##FUNCTION##Callback>(interceptor->TYPE()); \
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
+ INFO_FOR_SIDE_EFFECT); \
+ LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #TYPE, \
+ holder(), index)); \
+ f(index, callback_info); \
+ return GetReturnValue<RETURN_TYPE>(isolate); \
}
FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
@@ -70,11 +77,44 @@ FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
#undef FOR_EACH_CALLBACK
#undef CREATE_INDEXED_CALLBACK
+Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
+ Isolate* isolate = this->isolate();
+ LOG(isolate, ApiObjectAccess("call", holder()));
+ RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
+ v8::FunctionCallback f =
+ v8::ToCData<v8::FunctionCallback>(handler->callback());
+ if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
+ !isolate->debug()->PerformSideEffectCheckForCallback(handle(handler))) {
+ return Handle<Object>();
+ }
+ VMState<EXTERNAL> state(isolate);
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
+ FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
+ f(info);
+ return GetReturnValue<Object>(isolate);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kNamedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
+}
+
+Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
+ Handle<InterceptorInfo> interceptor) {
+ DCHECK(!interceptor->is_named());
+ LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
+ RuntimeCallTimerScope timer(isolate(),
+ RuntimeCallCounterId::kIndexedEnumeratorCallback);
+ return CallPropertyEnumerator(interceptor);
+}
+
Handle<Object> PropertyCallbackArguments::CallNamedGetter(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
- DCHECK(interceptor->is_named());
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
- DCHECK(!name->IsPrivate());
+ DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedGetterCallback);
@@ -82,13 +122,12 @@ Handle<Object> PropertyCallbackArguments::CallNamedGetter(
ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
GenericNamedPropertyGetterCallback f =
ToCData<GenericNamedPropertyGetterCallback>(interceptor->getter());
- return BasicCallNamedGetterCallback(f, name);
+ return BasicCallNamedGetterCallback(f, name, interceptor);
}
Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
- DCHECK(interceptor->is_named());
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedDescriptorCallback);
@@ -97,14 +136,15 @@ Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
GenericNamedPropertyDescriptorCallback f =
ToCData<GenericNamedPropertyDescriptorCallback>(
interceptor->descriptor());
- return BasicCallNamedGetterCallback(f, name);
+ return BasicCallNamedGetterCallback(f, name, interceptor);
}
Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
- GenericNamedPropertyGetterCallback f, Handle<Name> name) {
+ GenericNamedPropertyGetterCallback f, Handle<Name> name,
+ Handle<Object> info) {
DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
f(v8::Utils::ToLocal(name), callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -112,20 +152,15 @@ Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
Handle<Object> PropertyCallbackArguments::CallNamedSetter(
Handle<InterceptorInfo> interceptor, Handle<Name> name,
Handle<Object> value) {
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ DCHECK_NAME_COMPATIBLE(interceptor, name);
GenericNamedPropertySetterCallback f =
ToCData<GenericNamedPropertySetterCallback>(interceptor->setter());
- return CallNamedSetterCallback(f, name, value);
-}
-
-Handle<Object> PropertyCallbackArguments::CallNamedSetterCallback(
- GenericNamedPropertySetterCallback f, Handle<Name> name,
- Handle<Object> value) {
- DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedSetterCallback);
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ Handle<Object> side_effect_check_not_supported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
+ side_effect_check_not_supported);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
@@ -135,15 +170,15 @@ Handle<Object> PropertyCallbackArguments::CallNamedSetterCallback(
Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
Handle<InterceptorInfo> interceptor, Handle<Name> name,
const v8::PropertyDescriptor& desc) {
- DCHECK(interceptor->is_named());
- DCHECK(!name->IsPrivate());
- DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
+ DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedDefinerCallback);
GenericNamedPropertyDefinerCallback f =
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ Handle<Object> side_effect_check_not_supported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
+ side_effect_check_not_supported);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
f(v8::Utils::ToLocal(name), desc, callback_info);
@@ -158,7 +193,9 @@ Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
RuntimeCallCounterId::kIndexedSetterCallback);
IndexedPropertySetterCallback f =
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ Handle<Object> side_effect_check_not_supported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
+ side_effect_check_not_supported);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), callback_info);
@@ -174,7 +211,9 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
RuntimeCallCounterId::kIndexedDefinerCallback);
IndexedPropertyDefinerCallback f =
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ Handle<Object> side_effect_check_not_supported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
+ side_effect_check_not_supported);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
f(index, desc, callback_info);
@@ -191,7 +230,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
IndexedPropertyGetterCallback f =
ToCData<IndexedPropertyGetterCallback>(interceptor->getter());
- return BasicCallIndexedGetterCallback(f, index);
+ return BasicCallIndexedGetterCallback(f, index, interceptor);
}
Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
@@ -204,13 +243,13 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
holder(), index));
IndexedPropertyDescriptorCallback f =
ToCData<IndexedPropertyDescriptorCallback>(interceptor->descriptor());
- return BasicCallIndexedGetterCallback(f, index);
+ return BasicCallIndexedGetterCallback(f, index, interceptor);
}
Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
- IndexedPropertyGetterCallback f, uint32_t index) {
+ IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info) {
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
f(index, callback_info);
return GetReturnValue<Object>(isolate);
}
@@ -222,7 +261,7 @@ Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
// TODO(cbruni): assert same type for indexed and named callback.
Isolate* isolate = this->isolate();
- PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array);
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array, interceptor);
f(callback_info);
return GetReturnValue<JSObject>(isolate);
}
@@ -238,10 +277,10 @@ Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
AccessorNameGetterCallback f =
ToCData<AccessorNameGetterCallback>(info->getter());
- return BasicCallNamedGetterCallback(f, name);
+ return BasicCallNamedGetterCallback(f, name, info);
}
-void PropertyCallbackArguments::CallAccessorSetter(
+Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
Handle<AccessorInfo> accessor_info, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
@@ -249,9 +288,12 @@ void PropertyCallbackArguments::CallAccessorSetter(
RuntimeCallCounterId::kAccessorSetterCallback);
AccessorNameSetterCallback f =
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
- PREPARE_CALLBACK_INFO(isolate, f, void, void);
+ Handle<Object> side_effect_check_not_supported;
+ PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void,
+ side_effect_check_not_supported);
LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
+ return GetReturnValue<Object>(isolate);
}
#undef PREPARE_CALLBACK_INFO
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
deleted file mode 100644
index 502b8cbdca..0000000000
--- a/deps/v8/src/api-arguments.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/api-arguments.h"
-#include "src/api-arguments-inl.h"
-
-#include "src/debug/debug.h"
-#include "src/objects-inl.h"
-#include "src/tracing/trace-event.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
- Isolate* isolate = this->isolate();
- LOG(isolate, ApiObjectAccess("call", holder()));
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
- v8::FunctionCallback f =
- v8::ToCData<v8::FunctionCallback>(handler->callback());
- if (isolate->needs_side_effect_check() &&
- !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
- return Handle<Object>();
- }
- VMState<EXTERNAL> state(isolate);
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
- FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
- f(info);
- return GetReturnValue<Object>(isolate);
-}
-
-Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
- Handle<InterceptorInfo> interceptor) {
- DCHECK(interceptor->is_named());
- LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
- RuntimeCallTimerScope timer(isolate(),
- RuntimeCallCounterId::kNamedEnumeratorCallback);
- return CallPropertyEnumerator(interceptor);
-}
-
-Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
- Handle<InterceptorInfo> interceptor) {
- DCHECK(!interceptor->is_named());
- LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
- RuntimeCallTimerScope timer(isolate(),
- RuntimeCallCounterId::kIndexedEnumeratorCallback);
- return CallPropertyEnumerator(interceptor);
-}
-
-bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
- Address function) {
- return isolate->debug()->PerformSideEffectCheckForCallback(function);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 413a72a3ae..0abbcdcafa 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -6,6 +6,7 @@
#define V8_API_ARGUMENTS_H_
#include "src/api.h"
+#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/visitors.h"
@@ -15,34 +16,30 @@ namespace internal {
// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
-template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
- public:
- virtual inline void IterateInstance(RootVisitor* v) {
- v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
- values_ + kArrayLength);
- }
-
protected:
- inline Object** begin() { return values_; }
explicit inline CustomArgumentsBase(Isolate* isolate)
: Relocatable(isolate) {}
- Object* values_[kArrayLength];
};
template <typename T>
-class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
+class CustomArguments : public CustomArgumentsBase {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
- typedef CustomArgumentsBase<T::kArgsLength> Super;
~CustomArguments() {
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
+ virtual inline void IterateInstance(RootVisitor* v) {
+ v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
+ values_ + T::kArgsLength);
+ }
+
protected:
- explicit inline CustomArguments(Isolate* isolate) : Super(isolate) {}
+ explicit inline CustomArguments(Isolate* isolate)
+ : CustomArgumentsBase(isolate) {}
template <typename V>
Handle<V> GetReturnValue(Isolate* isolate);
@@ -50,6 +47,9 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> {
inline Isolate* isolate() {
return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]);
}
+
+ inline Object** begin() { return values_; }
+ Object* values_[T::kArgsLength];
};
template <typename T>
@@ -103,8 +103,9 @@ class PropertyCallbackArguments
// -------------------------------------------------------------------------
// Accessor Callbacks
// Also used for AccessorSetterCallback.
- inline void CallAccessorSetter(Handle<AccessorInfo> info, Handle<Name> name,
- Handle<Object> value);
+ inline Handle<Object> CallAccessorSetter(Handle<AccessorInfo> info,
+ Handle<Name> name,
+ Handle<Object> value);
// Also used for AccessorGetterCallback, AccessorNameGetterCallback.
inline Handle<Object> CallAccessorGetter(Handle<AccessorInfo> info,
Handle<Name> name);
@@ -118,9 +119,6 @@ class PropertyCallbackArguments
inline Handle<Object> CallNamedSetter(Handle<InterceptorInfo> interceptor,
Handle<Name> name,
Handle<Object> value);
- inline Handle<Object> CallNamedSetterCallback(
- GenericNamedPropertySetterCallback callback, Handle<Name> name,
- Handle<Object> value);
inline Handle<Object> CallNamedDefiner(Handle<InterceptorInfo> interceptor,
Handle<Name> name,
const v8::PropertyDescriptor& desc);
@@ -128,7 +126,8 @@ class PropertyCallbackArguments
Handle<Name> name);
inline Handle<Object> CallNamedDescriptor(Handle<InterceptorInfo> interceptor,
Handle<Name> name);
- Handle<JSObject> CallNamedEnumerator(Handle<InterceptorInfo> interceptor);
+ inline Handle<JSObject> CallNamedEnumerator(
+ Handle<InterceptorInfo> interceptor);
// -------------------------------------------------------------------------
// Indexed Interceptor Callbacks
@@ -145,7 +144,8 @@ class PropertyCallbackArguments
uint32_t index);
inline Handle<Object> CallIndexedDescriptor(
Handle<InterceptorInfo> interceptor, uint32_t index);
- Handle<JSObject> CallIndexedEnumerator(Handle<InterceptorInfo> interceptor);
+ inline Handle<JSObject> CallIndexedEnumerator(
+ Handle<InterceptorInfo> interceptor);
private:
/*
@@ -160,16 +160,15 @@ class PropertyCallbackArguments
Handle<InterceptorInfo> interceptor);
inline Handle<Object> BasicCallIndexedGetterCallback(
- IndexedPropertyGetterCallback f, uint32_t index);
+ IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info);
inline Handle<Object> BasicCallNamedGetterCallback(
- GenericNamedPropertyGetterCallback f, Handle<Name> name);
+ GenericNamedPropertyGetterCallback f, Handle<Name> name,
+ Handle<Object> info);
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
- bool PerformSideEffectCheck(Isolate* isolate, Address function);
-
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.
DISALLOW_COPY_AND_ASSIGN(PropertyCallbackArguments);
@@ -216,7 +215,7 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
- Handle<Object> Call(CallHandlerInfo* handler);
+ inline Handle<Object> Call(CallHandlerInfo* handler);
private:
inline JSObject* holder() {
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 488b99fd25..981f592f5a 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -626,7 +626,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
maybe_name);
// To simplify things, API functions always have shared name.
- DCHECK(shared->has_shared_name());
+ DCHECK(shared->HasSharedName());
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index 398f198ae5..f73e7cee7e 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -21,15 +21,15 @@ class ApiNatives {
public:
static const int kInitialFunctionCacheSize = 256;
- MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
Handle<FunctionTemplateInfo> data,
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
- MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target = Handle<JSReceiver>());
- MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data);
enum ApiInstanceType {
@@ -41,7 +41,7 @@ class ApiNatives {
static Handle<JSFunction> CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type,
- MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
+ MaybeHandle<Name> name = MaybeHandle<Name>());
static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index f5be62058a..8b177d041d 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -69,6 +69,7 @@
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
+#include "src/string-hasher.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
@@ -307,20 +308,23 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
// --- E x c e p t i o n B e h a v i o r ---
-
-void i::FatalProcessOutOfMemory(const char* location) {
- i::V8::FatalProcessOutOfMemory(location, false);
+void i::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location) {
+ i::V8::FatalProcessOutOfMemory(isolate, location, false);
}
// When V8 cannot allocate memory FatalProcessOutOfMemory is called. The default
// OOM error handler is called and execution is stopped.
-void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
- i::Isolate* isolate = i::Isolate::Current();
+void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
+ bool is_heap_oom) {
char last_few_messages[Heap::kTraceRingBufferSize + 1];
char js_stacktrace[Heap::kStacktraceBufferSize + 1];
i::HeapStats heap_stats;
if (isolate == nullptr) {
+ isolate = Isolate::Current();
+ }
+
+ if (isolate == nullptr) {
// On a background thread -> we cannot retrieve memory information from the
// Isolate. Write easy-to-recognize values on the stack.
memset(last_few_messages, 0x0BADC0DE, Heap::kTraceRingBufferSize + 1);
@@ -329,7 +333,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
// Note that the embedder's oom handler won't be called in this case. We
// just crash.
FATAL("API fatal error handler returned after process out of memory");
- return;
+ UNREACHABLE();
}
memset(last_few_messages, 0, Heap::kTraceRingBufferSize + 1);
@@ -393,7 +397,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
PrintF("\n<--- Last few GCs --->\n%s\n", first_newline);
PrintF("\n<--- JS stacktrace --->\n%s\n", js_stacktrace);
}
- Utils::ReportOOMFailure(location, is_heap_oom);
+ Utils::ReportOOMFailure(isolate, location, is_heap_oom);
// If the fatal error handler returns, we stop execution.
FATAL("API fatal error handler returned after process out of memory");
}
@@ -415,8 +419,8 @@ void Utils::ReportApiFailure(const char* location, const char* message) {
isolate->SignalFatalError();
}
-void Utils::ReportOOMFailure(const char* location, bool is_heap_oom) {
- i::Isolate* isolate = i::Isolate::Current();
+void Utils::ReportOOMFailure(i::Isolate* isolate, const char* location,
+ bool is_heap_oom) {
OOMErrorCallback oom_callback = isolate->oom_behavior();
if (oom_callback == nullptr) {
// TODO(wfh): Remove this fallback once Blink is setting OOM handler. See
@@ -704,7 +708,7 @@ StartupData SnapshotCreator::CreateBlob(
// context even after we have disposed of the context.
isolate->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kSnapshotCreator);
- isolate->heap()->CompactWeakFixedArrays();
+ isolate->heap()->CompactFixedArraysOfWeakCells();
i::DisallowHeapAllocation no_gc_from_here_on;
@@ -728,15 +732,33 @@ StartupData SnapshotCreator::CreateBlob(
i::SerializedHandleChecker handle_checker(isolate, &contexts);
CHECK(handle_checker.CheckGlobalAndEternalHandles());
- // Complete in-object slack tracking for all functions.
i::HeapIterator heap_iterator(isolate->heap());
while (i::HeapObject* current_obj = heap_iterator.next()) {
- if (!current_obj->IsJSFunction()) continue;
- i::JSFunction* fun = i::JSFunction::cast(current_obj);
- fun->CompleteInobjectSlackTrackingIfActive();
+ if (current_obj->IsJSFunction()) {
+ i::JSFunction* fun = i::JSFunction::cast(current_obj);
+
+ // Complete in-object slack tracking for all functions.
+ fun->CompleteInobjectSlackTrackingIfActive();
+
+ // Also, clear out feedback vectors.
+ fun->feedback_cell()->set_value(isolate->heap()->undefined_value());
+ }
+
+ // Clear out re-compilable data from all shared function infos. Any
+ // JSFunctions using these SFIs will have their code pointers reset by the
+ // partial serializer.
+ if (current_obj->IsSharedFunctionInfo() &&
+ function_code_handling == FunctionCodeHandling::kClear) {
+ i::SharedFunctionInfo* shared = i::SharedFunctionInfo::cast(current_obj);
+ if (shared->CanFlushCompiled()) {
+ shared->FlushCompiled();
+ }
+ DCHECK(shared->HasCodeObject() || shared->HasBuiltinId() ||
+ shared->IsApiFunction());
+ }
}
- i::StartupSerializer startup_serializer(isolate, function_code_handling);
+ i::StartupSerializer startup_serializer(isolate);
startup_serializer.SerializeStrongReferences();
// Serialize each context with a new partial serializer.
@@ -1221,13 +1243,18 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
if (!Utils::ApiCheck(can_grow, location, "Index too large")) {
return i::Handle<i::FixedArray>();
}
- int new_size = i::Max(index, data->length() << 1) + 1;
+ int new_size = index + 1;
int grow_by = new_size - data->length();
data = isolate->factory()->CopyFixedArrayAndGrow(data, grow_by);
env->set_embedder_data(*data);
return data;
}
+uint32_t Context::GetNumberOfEmbedderDataFields() {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ CHECK(context->IsNativeContext());
+ return static_cast<uint32_t>(context->embedder_data()->length());
+}
v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
const char* location = "v8::Context::GetEmbedderData()";
@@ -1375,7 +1402,8 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
static Local<FunctionTemplate> FunctionTemplateNew(
i::Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
v8::Local<Signature> signature, int length, bool do_not_cache,
- v8::Local<Private> cached_property_name = v8::Local<Private>()) {
+ v8::Local<Private> cached_property_name = v8::Local<Private>(),
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE, i::TENURED);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -1388,7 +1416,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
- Utils::ToLocal(obj)->SetCallHandler(callback, data);
+ Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
}
obj->set_length(length);
obj->set_undetectable(false);
@@ -1413,8 +1441,8 @@ Local<FunctionTemplate> FunctionTemplate::New(
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- auto templ =
- FunctionTemplateNew(i_isolate, callback, data, signature, length, false);
+ auto templ = FunctionTemplateNew(i_isolate, callback, data, signature, length,
+ false, Local<Private>(), side_effect_type);
if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
return templ;
}
@@ -1442,7 +1470,7 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCache(
LOG_API(i_isolate, FunctionTemplate, NewWithCache);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, data, signature, length,
- false, cache_property);
+ false, cache_property, side_effect_type);
}
Local<Signature> Signature::New(Isolate* isolate,
@@ -1470,10 +1498,8 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
i::Isolate* isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::TENURED);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ i::Handle<i::CallHandlerInfo> obj = isolate->factory()->NewCallHandlerInfo(
+ side_effect_type == SideEffectType::kHasNoSideEffect);
SET_FIELD_WRAPPED(obj, set_callback, callback);
SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
@@ -1781,6 +1807,9 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
static_cast<int>(PropertyHandlerFlags::kAllCanRead));
obj->set_non_masking(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kNonMasking));
+ obj->set_has_no_side_effect(
+ static_cast<int>(flags) &
+ static_cast<int>(PropertyHandlerFlags::kHasNoSideEffect));
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1940,7 +1969,6 @@ void ObjectTemplate::SetHandler(
cons->set_indexed_property_handler(*obj);
}
-
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -1948,10 +1976,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
- i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::TENURED);
- i::Handle<i::CallHandlerInfo> obj =
- i::Handle<i::CallHandlerInfo>::cast(struct_obj);
+ i::Handle<i::CallHandlerInfo> obj = isolate->factory()->NewCallHandlerInfo();
SET_FIELD_WRAPPED(obj, set_callback, callback);
SET_FIELD_WRAPPED(obj, set_js_callback, obj->redirected_callback());
if (data.IsEmpty()) {
@@ -2374,22 +2399,14 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
source->host_defined_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
- str, script_details, source->resource_options, nullptr, &script_data,
+ str, script_details, source->resource_options, nullptr, script_data,
options, no_cache_reason, i::NOT_NATIVES_CODE);
- has_pending_exception = !maybe_function_info.ToHandle(&result);
- if (has_pending_exception && script_data != nullptr) {
- // This case won't happen during normal operation; we have compiled
- // successfully and produced cached data, and but the second compilation
- // of the same source code fails.
- delete script_data;
- script_data = nullptr;
- }
- RETURN_ON_FAILED_EXECUTION(UnboundScript);
-
if (options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
delete script_data;
+ has_pending_exception = !maybe_function_info.ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
@@ -2483,6 +2500,10 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Function);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
+ DCHECK(options == CompileOptions::kConsumeCodeCache ||
+ options == CompileOptions::kEagerCompile ||
+ options == CompileOptions::kNoCompileOptions);
+
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
isolate);
@@ -2511,25 +2532,30 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
extension);
}
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- if (!source->resource_name.IsEmpty()) {
- name_obj = Utils::OpenHandle(*(source->resource_name));
- }
- if (!source->resource_line_offset.IsEmpty()) {
- line_offset = static_cast<int>(source->resource_line_offset->Value());
- }
- if (!source->resource_column_offset.IsEmpty()) {
- column_offset = static_cast<int>(source->resource_column_offset->Value());
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, source->resource_name, source->resource_line_offset,
+ source->resource_column_offset, source->source_map_url,
+ source->host_defined_options);
+
+ i::ScriptData* script_data = nullptr;
+ if (options == kConsumeCodeCache) {
+ DCHECK(source->cached_data);
+ // ScriptData takes care of pointer-aligning the data.
+ script_data = new i::ScriptData(source->cached_data->data,
+ source->cached_data->length);
}
i::Handle<i::JSFunction> result;
has_pending_exception =
!i::Compiler::GetWrappedFunction(
Utils::OpenHandle(*source->source_string), arguments_list, context,
- line_offset, column_offset, name_obj, source->resource_options)
+ script_details, source->resource_options, script_data, options,
+ no_cache_reason)
.ToHandle(&result);
+ if (options == kConsumeCodeCache) {
+ source->cached_data->rejected = script_data->rejected();
+ }
+ delete script_data;
RETURN_ON_FAILED_EXECUTION(Function);
RETURN_ESCAPED(Utils::CallableToLocal(result));
}
@@ -2603,37 +2629,18 @@ ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCache(
i::Handle<i::SharedFunctionInfo> shared =
i::Handle<i::SharedFunctionInfo>::cast(
Utils::OpenHandle(*unbound_script));
- i::Isolate* isolate = shared->GetIsolate();
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
- base::ElapsedTimer timer;
- if (i::FLAG_profile_deserialization) {
- timer.Start();
- }
- i::HistogramTimerScope histogram_timer(
- isolate->counters()->compile_serialize());
- i::RuntimeCallTimerScope runtimeTimer(
- isolate, i::RuntimeCallCounterId::kCompileSerialize);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
-
+ i::Handle<i::String> source_str = Utils::OpenHandle(*source);
DCHECK(shared->is_toplevel());
- i::Handle<i::Script> script(i::Script::cast(shared->script()));
- // TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
- // context independent.
- if (script->ContainsAsmModule()) return nullptr;
- if (isolate->debug()->is_loaded()) return nullptr;
-
- i::ScriptData* script_data =
- i::CodeSerializer::Serialize(isolate, shared, Utils::OpenHandle(*source));
- CachedData* result = new CachedData(
- script_data->data(), script_data->length(), CachedData::BufferOwned);
- script_data->ReleaseDataOwnership();
- delete script_data;
+ return i::CodeSerializer::Serialize(shared, source_str);
+}
- if (i::FLAG_profile_deserialization) {
- i::PrintF("[Serializing took %0.3f ms]\n",
- timer.Elapsed().InMillisecondsF());
- }
- return result;
+ScriptCompiler::CachedData* ScriptCompiler::CreateCodeCacheForFunction(
+ Local<Function> function, Local<String> source) {
+ i::Handle<i::SharedFunctionInfo> shared(
+ i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*function))->shared());
+ i::Handle<i::String> source_str = Utils::OpenHandle(*source);
+ CHECK(shared->is_wrapped());
+ return i::CodeSerializer::Serialize(shared, source_str);
}
MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
@@ -2889,34 +2896,34 @@ int Message::ErrorLevel() const {
return self->error_level();
}
-Maybe<int> Message::GetStartColumn(Local<Context> context) const {
+int Message::GetStartColumn() const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
auto msg = i::Handle<i::JSMessageObject>::cast(self);
- return Just(msg->GetColumnNumber());
+ return msg->GetColumnNumber();
}
-
-int Message::GetStartColumn() const {
- auto context = ContextFromHeapObject(Utils::OpenHandle(this));
- const int default_value = kNoColumnInfo;
- return GetStartColumn(context).FromMaybe(default_value);
+Maybe<int> Message::GetStartColumn(Local<Context> context) const {
+ return Just(GetStartColumn());
}
-
-Maybe<int> Message::GetEndColumn(Local<Context> context) const {
+int Message::GetEndColumn() const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
auto msg = i::Handle<i::JSMessageObject>::cast(self);
const int column_number = msg->GetColumnNumber();
- if (column_number == -1) return Just(-1);
+ if (column_number == -1) return -1;
const int start = self->start_position();
const int end = self->end_position();
- return Just(column_number + (end - start));
+ return column_number + (end - start);
+}
+
+Maybe<int> Message::GetEndColumn(Local<Context> context) const {
+ return Just(GetEndColumn());
}
@@ -3468,6 +3475,7 @@ bool Value::IsNumber() const {
return Utils::OpenHandle(this)->IsNumber();
}
+bool Value::IsBigInt() const { return Utils::OpenHandle(this)->IsBigInt(); }
bool Value::IsProxy() const { return Utils::OpenHandle(this)->IsJSProxy(); }
@@ -3486,6 +3494,7 @@ bool Value::IsWebAssemblyCompiledModule() const {
}
VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, JSArgumentsObject)
+VALUE_IS_SPECIFIC_TYPE(BigIntObject, BigIntWrapper)
VALUE_IS_SPECIFIC_TYPE(BooleanObject, BooleanWrapper)
VALUE_IS_SPECIFIC_TYPE(NumberObject, NumberWrapper)
VALUE_IS_SPECIFIC_TYPE(StringObject, StringWrapper)
@@ -3574,10 +3583,6 @@ bool Value::IsSetIterator() const {
bool Value::IsPromise() const { return Utils::OpenHandle(this)->IsJSPromise(); }
-bool Value::IsModuleNamespaceObject() const {
- return Utils::OpenHandle(this)->IsJSModuleNamespace();
-}
-
MaybeLocal<String> Value::ToString(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
@@ -3622,6 +3627,16 @@ Local<v8::Object> Value::ToObject(Isolate* isolate) const {
RETURN_TO_LOCAL_UNCHECKED(ToObject(isolate->GetCurrentContext()), Object);
}
+MaybeLocal<BigInt> Value::ToBigInt(Local<Context> context) const {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsBigInt()) return ToApiHandle<BigInt>(obj);
+ PREPARE_FOR_EXECUTION(context, Object, ToBigInt, BigInt);
+ Local<BigInt> result;
+ has_pending_exception =
+ !ToLocal<BigInt>(i::BigInt::FromObject(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(BigInt);
+ RETURN_ESCAPED(result);
+}
MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
@@ -3789,6 +3804,10 @@ void v8::Uint32::CheckCast(v8::Value* that) {
"Could not convert to 32-bit unsigned integer");
}
+void v8::BigInt::CheckCast(v8::Value* that) {
+ Utils::ApiCheck(that->IsBigInt(), "v8::BigInt::Cast",
+ "Could not convert to BigInt");
+}
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -3915,6 +3934,11 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
"Could not convert to NumberObject");
}
+void v8::BigIntObject::CheckCast(v8::Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsBigIntWrapper(), "v8::BigIntObject::Cast()",
+ "Could not convert to BigIntObject");
+}
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -4675,13 +4699,12 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
}
template <typename Getter, typename Setter, typename Data>
-static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
- Local<Name> name, Getter getter,
- Setter setter, Data data,
- AccessControl settings,
- PropertyAttribute attributes,
- bool is_special_data_property,
- bool replace_on_access) {
+static Maybe<bool> ObjectSetAccessor(
+ Local<Context> context, Object* self, Local<Name> name, Getter getter,
+ Setter setter, Data data, AccessControl settings,
+ PropertyAttribute attributes, bool is_special_data_property,
+ bool replace_on_access,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@@ -4692,6 +4715,8 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
is_special_data_property, replace_on_access);
+ info->set_has_no_side_effect(getter_side_effect_type ==
+ SideEffectType::kHasNoSideEffect);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4717,7 +4742,8 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
SideEffectType getter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
- i::FLAG_disable_old_api_accessors, false);
+ i::FLAG_disable_old_api_accessors, false,
+ getter_side_effect_type);
}
@@ -4746,7 +4772,7 @@ Maybe<bool> Object::SetNativeDataProperty(
v8::Local<Value> data, PropertyAttribute attributes,
SideEffectType getter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
- attributes, true, false);
+ attributes, true, false, getter_side_effect_type);
}
Maybe<bool> Object::SetLazyDataProperty(
@@ -4755,7 +4781,8 @@ Maybe<bool> Object::SetLazyDataProperty(
PropertyAttribute attributes, SideEffectType getter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr),
- data, DEFAULT, attributes, true, true);
+ data, DEFAULT, attributes, true, true,
+ getter_side_effect_type);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@@ -5025,8 +5052,9 @@ MaybeLocal<Function> Function::New(Local<Context> context,
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
LOG_API(isolate, Function, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- auto templ = FunctionTemplateNew(isolate, callback, data, Local<Signature>(),
- length, true);
+ auto templ =
+ FunctionTemplateNew(isolate, callback, data, Local<Signature>(), length,
+ true, Local<Private>(), side_effect_type);
if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
return templ->GetFunction(context);
}
@@ -5091,7 +5119,7 @@ void Function::SetName(v8::Local<v8::String> name) {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) return;
auto func = i::Handle<i::JSFunction>::cast(self);
- func->shared()->set_name(*Utils::OpenHandle(*name));
+ func->shared()->SetName(*Utils::OpenHandle(*name));
}
@@ -5108,7 +5136,7 @@ Local<Value> Function::GetName() const {
}
if (self->IsJSFunction()) {
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(handle(func->shared()->name(), isolate));
+ return Utils::ToLocal(handle(func->shared()->Name(), isolate));
}
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -5183,7 +5211,7 @@ int Function::GetScriptLineNumber() const {
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::Script::GetLineNumber(script, func->shared()->start_position());
+ return i::Script::GetLineNumber(script, func->shared()->StartPosition());
}
return kLineOffsetNotFound;
}
@@ -5197,7 +5225,7 @@ int Function::GetScriptColumnNumber() const {
auto func = i::Handle<i::JSFunction>::cast(self);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return i::Script::GetColumnNumber(script, func->shared()->start_position());
+ return i::Script::GetColumnNumber(script, func->shared()->StartPosition());
}
return kLineOffsetNotFound;
}
@@ -5556,6 +5584,7 @@ static int Utf8Length(i::String* str, i::Isolate* isolate) {
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
+ str = i::String::Flatten(str);
i::Isolate* isolate = str->GetIsolate();
return v8::Utf8Length(*str, isolate);
}
@@ -5769,9 +5798,7 @@ int String::WriteUtf8(char* buffer,
i::Isolate* isolate = str->GetIsolate();
LOG_API(isolate, String, WriteUtf8);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (options & HINT_MANY_WRITES_EXPECTED) {
- str = i::String::Flatten(str); // Flatten the string for efficiency.
- }
+ str = i::String::Flatten(str); // Flatten the string for efficiency.
const int string_length = str->length();
bool write_null = !(options & NO_NULL_TERMINATION);
bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8);
@@ -5823,11 +5850,7 @@ static inline int WriteHelper(const String* string,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
- if (options & String::HINT_MANY_WRITES_EXPECTED) {
- // Flatten the string for efficiency. This applies whether we are
- // using StringCharacterStream or Get(i) to access the characters.
- str = i::String::Flatten(str);
- }
+ str = i::String::Flatten(str);
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
end = str->length();
@@ -6088,6 +6111,10 @@ bool V8::RegisterDefaultSignalHandler() {
return v8::internal::trap_handler::RegisterDefaultSignalHandler();
}
+bool V8::EnableWebAssemblyTrapHandler(bool use_v8_signal_handler) {
+ return v8::internal::trap_handler::EnableTrapHandler(use_v8_signal_handler);
+}
+
void v8::V8::SetEntropySource(EntropySource entropy_source) {
base::RandomNumberGenerator::SetEntropySource(entropy_source);
}
@@ -6136,6 +6163,10 @@ HeapObjectStatistics::HeapObjectStatistics()
HeapCodeStatistics::HeapCodeStatistics()
: code_and_metadata_size_(0), bytecode_and_metadata_size_(0) {}
+bool v8::V8::InitializeICU(const char* icu_data_file) {
+ return i::InitializeICU(icu_data_file);
+}
+
bool v8::V8::InitializeICUDefaultLocation(const char* exec_path,
const char* icu_data_file) {
return i::InitializeICUDefaultLocation(exec_path, icu_data_file);
@@ -6604,8 +6635,7 @@ inline int StringLength(const uint16_t* string) {
return length;
}
-
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
v8::NewStringType type,
i::Vector<const char> string) {
@@ -6615,8 +6645,7 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
return factory->NewStringFromUtf8(string);
}
-
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
v8::NewStringType type,
i::Vector<const uint8_t> string) {
@@ -6626,8 +6655,7 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
return factory->NewStringFromOneByte(string);
}
-
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
v8::NewStringType type,
i::Vector<const uint16_t> string) {
@@ -6867,6 +6895,25 @@ double v8::NumberObject::ValueOf() const {
return jsvalue->value()->Number();
}
+Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
+ CHECK(i::FLAG_harmony_bigint);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, BigIntObject, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Handle<i::Object> bigint = i::BigInt::FromInt64(i_isolate, value);
+ i::Handle<i::Object> obj =
+ i::Object::ToObject(i_isolate, bigint).ToHandleChecked();
+ return Utils::ToLocal(obj);
+}
+
+Local<v8::BigInt> v8::BigIntObject::ValueOf() const {
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ i::Isolate* isolate = jsvalue->GetIsolate();
+ LOG_API(isolate, BigIntObject, BigIntValue);
+ return Utils::ToLocal(
+ i::Handle<i::BigInt>(i::BigInt::cast(jsvalue->value())));
+}
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -7644,17 +7691,23 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
- if (self->has_guard_region()) {
+
+ // We need to capture the contents before releasing the allocation from the
+ // Wasm tracker, because otherwise we will not correctly capture the
+ // allocation data.
+ const v8::ArrayBuffer::Contents contents = GetContents();
+ if (self->is_wasm_memory()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
- isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
- self->allocation_length());
+ isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
+ self->backing_store());
}
isolate->heap()->UnregisterArrayBuffer(*self);
- return GetContents();
+ // A regular copy is good enough. No move semantics needed.
+ return contents;
}
@@ -7664,7 +7717,7 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
Contents contents;
contents.allocation_base_ = self->allocation_base();
contents.allocation_length_ = self->allocation_length();
- contents.allocation_mode_ = self->has_guard_region()
+ contents.allocation_mode_ = self->is_wasm_memory()
? Allocator::AllocationMode::kReservation
: Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
@@ -7702,7 +7755,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
// version that throws an exception or otherwise does not crash.
if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) {
- i::FatalProcessOutOfMemory("v8::ArrayBuffer::New");
+ i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New");
}
return Utils::ToLocal(obj);
}
@@ -7867,16 +7920,23 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
- if (self->has_guard_region()) {
+
+ // We need to capture the contents before releasing the allocation from the
+ // Wasm tracker, because otherwise we will not correctly capture the
+ // allocation data.
+ const v8::SharedArrayBuffer::Contents contents = GetContents();
+ if (self->is_wasm_memory()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
- isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
- self->allocation_length());
+ isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
+ self->backing_store());
}
isolate->heap()->UnregisterArrayBuffer(*self);
- return GetContents();
+
+ // A regular copy is good enough. No move semantics needed.
+ return contents;
}
@@ -7887,7 +7947,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
contents.allocation_base_ = self->allocation_base();
contents.allocation_length_ = self->allocation_length();
contents.allocation_mode_ =
- self->has_guard_region()
+ self->is_wasm_memory()
? ArrayBufferAllocator::Allocator::AllocationMode::kReservation
: ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
@@ -7914,7 +7974,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
// version that throws an exception or otherwise does not crash.
if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
i::SharedFlag::kShared)) {
- i::FatalProcessOutOfMemory("v8::SharedArrayBuffer::New");
+ i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New");
}
return Utils::ToLocalShared(obj);
}
@@ -8041,6 +8101,13 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
return Utils::IntegerToLocal(result);
}
+Local<BigInt> v8::BigInt::New(Isolate* isolate, int64_t value) {
+ CHECK(i::FLAG_harmony_bigint);
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
+ i::Handle<i::BigInt> result = i::BigInt::FromInt64(internal_isolate, value);
+ return Utils::ToLocal(result);
+}
void Isolate::ReportExternalAllocationLimitReached() {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
@@ -8067,6 +8134,10 @@ CpuProfiler* Isolate::GetCpuProfiler() {
return reinterpret_cast<CpuProfiler*>(cpu_profiler);
}
+void Isolate::SetIdle(bool is_idle) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetIdle(is_idle);
+}
bool Isolate::InContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8745,19 +8816,14 @@ void Isolate::SetRAILMode(RAILMode rail_mode) {
}
void Isolate::IncreaseHeapLimitForDebugging() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->heap()->IncreaseHeapLimitForDebugging();
+ // No-op.
}
void Isolate::RestoreOriginalHeapLimit() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->heap()->RestoreOriginalHeapLimit();
+ // No-op.
}
-bool Isolate::IsHeapLimitIncreasedForDebugging() {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- return isolate->heap()->IsHeapLimitIncreasedForDebugging();
-}
+bool Isolate::IsHeapLimitIncreasedForDebugging() { return false; }
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
@@ -8806,6 +8872,18 @@ CALLBACK_SETTER(WasmInstanceCallback, ExtensionCallback, wasm_instance_callback)
CALLBACK_SETTER(WasmCompileStreamingCallback, ApiImplementationCallback,
wasm_compile_streaming_callback)
+void Isolate::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ void* data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->AddNearHeapLimitCallback(callback, data);
+}
+
+void Isolate::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RemoveNearHeapLimitCallback(callback, heap_limit);
+}
+
bool Isolate::IsDead() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsDead();
@@ -9054,6 +9132,16 @@ int debug::GetContextId(Local<Context> context) {
return (value->IsSmi()) ? i::Smi::ToInt(value) : 0;
}
+void debug::SetInspector(Isolate* isolate,
+ v8_inspector::V8Inspector* inspector) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i_isolate->set_inspector(inspector);
+}
+
+v8_inspector::V8Inspector* debug::GetInspector(Isolate* isolate) {
+ return reinterpret_cast<i::Isolate*>(isolate)->inspector();
+}
+
Local<Context> debug::GetDebugContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@@ -9118,9 +9206,7 @@ void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
void debug::SetOutOfMemoryCallback(Isolate* isolate,
OutOfMemoryCallback callback, void* data) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i_isolate->heap()->SetOutOfMemoryCallback(callback, data);
+ // No-op.
}
void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
@@ -9367,8 +9453,8 @@ bool debug::Script::SetBreakpoint(v8::Local<v8::String> condition,
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Isolate* isolate = script->GetIsolate();
int offset = GetSourceOffset(*location);
- if (!isolate->debug()->SetBreakpoint(script, Utils::OpenHandle(*condition),
- &offset, id)) {
+ if (!isolate->debug()->SetBreakPointForScript(
+ script, Utils::OpenHandle(*condition), &offset, id)) {
return false;
}
*location = GetSourceLocation(offset);
@@ -9425,6 +9511,25 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
static_cast<int>(func.code.end_offset()));
}
+uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
+ i::DisallowHeapAllocation no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::WasmCompiledModule* compiled_module =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ i::wasm::WasmModule* module = compiled_module->shared()->module();
+ DCHECK_LE(0, function_index);
+ DCHECK_GT(module->functions.size(), function_index);
+ i::wasm::WasmFunction& func = module->functions[function_index];
+ i::SeqOneByteString* module_bytes = compiled_module->shared()->module_bytes();
+ i::wasm::ModuleWireBytes wire_bytes(
+ module_bytes->GetFlatContent().ToOneByteVector());
+ i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
+ // TODO(herhut): Maybe also take module, name and signature into account.
+ return i::StringHasher::HashSequentialString(function_bytes.start(),
+ function_bytes.length(), 0);
+}
+
debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
int function_index) const {
i::DisallowHeapAllocation no_gc;
@@ -9480,15 +9585,15 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
Local<String> source) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
- i::ScriptData* script_data = nullptr;
i::Handle<i::String> str = Utils::OpenHandle(*source);
i::Handle<i::SharedFunctionInfo> result;
{
ScriptOriginOptions origin_options;
+ i::ScriptData* script_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
str, i::Compiler::ScriptDetails(), origin_options, nullptr,
- &script_data, ScriptCompiler::kNoCompileOptions,
+ script_data, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
: i::INSPECTOR_CODE);
@@ -9526,20 +9631,21 @@ int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
return i::Handle<i::HeapObject>::cast(object)->Size();
}
-v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
- if (IsMap()) {
+v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
+ v8::Local<v8::Value> value,
+ bool* is_key_value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ if (value->IsMap()) {
*is_key_value = true;
- return Map::Cast(this)->AsArray();
+ return value.As<Map>()->AsArray();
}
- if (IsSet()) {
+ if (value->IsSet()) {
*is_key_value = false;
- return Set::Cast(this)->AsArray();
+ return value.As<Set>()->AsArray();
}
- i::Handle<i::JSReceiver> object = Utils::OpenHandle(this);
- i::Isolate* isolate = object->GetIsolate();
- Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ i::Handle<i::Object> object = Utils::OpenHandle(*value);
if (object->IsJSWeakCollection()) {
*is_key_value = object->IsJSWeakMap();
return Utils::ToLocal(i::JSWeakCollection::GetEntries(
@@ -9591,9 +9697,8 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
}
i::Handle<i::String> name = isolate->factory()->empty_string();
- i::Handle<i::Code> code(isolate->builtins()->builtin(builtin_id));
i::NewFunctionArgs args = i::NewFunctionArgs::ForBuiltinWithoutPrototype(
- name, code, builtin_id, i::LanguageMode::kSloppy);
+ name, builtin_id, i::LanguageMode::kSloppy);
i::Handle<i::JSFunction> fun = isolate->factory()->NewFunction(args);
fun->shared()->DontAdaptArguments();
@@ -9668,12 +9773,14 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
}
MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
- v8::Local<v8::String> source) {
+ v8::Local<v8::String> source,
+ bool throw_on_side_effect) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
- i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source)),
+ i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source),
+ throw_on_side_effect),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
@@ -9755,6 +9862,35 @@ int64_t debug::GetNextRandomInt64(v8::Isolate* v8_isolate) {
->NextInt64();
}
+int debug::GetDebuggingId(v8::Local<v8::Function> function) {
+ i::JSReceiver* callable = *v8::Utils::OpenHandle(*function);
+ if (!callable->IsJSFunction()) return i::SharedFunctionInfo::kNoDebuggingId;
+ i::JSFunction* fun = i::JSFunction::cast(callable);
+ i::SharedFunctionInfo* shared = fun->shared();
+ int id = shared->debugging_id();
+ if (id == i::SharedFunctionInfo::kNoDebuggingId) {
+ id = shared->GetHeap()->NextDebuggingId();
+ shared->set_debugging_id(id);
+ }
+ DCHECK_NE(i::SharedFunctionInfo::kNoDebuggingId, id);
+ return id;
+}
+
+bool debug::SetFunctionBreakpoint(v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition,
+ BreakpointId* id) {
+ i::Handle<i::JSReceiver> callable = Utils::OpenHandle(*function);
+ if (!callable->IsJSFunction()) return false;
+ i::Handle<i::JSFunction> jsfunction =
+ i::Handle<i::JSFunction>::cast(callable);
+ i::Isolate* isolate = jsfunction->GetIsolate();
+ i::Handle<i::String> condition_string =
+ condition.IsEmpty() ? isolate->factory()->empty_string()
+ : Utils::OpenHandle(*condition);
+ return isolate->debug()->SetBreakpointForFunction(jsfunction,
+ condition_string, id);
+}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -10061,15 +10197,7 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
void CpuProfiler::SetIdle(bool is_idle) {
i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
i::Isolate* isolate = profiler->isolate();
- if (!isolate->is_profiling()) return;
- v8::StateTag state = isolate->current_vm_state();
- DCHECK(state == v8::EXTERNAL || state == v8::IDLE);
- if (isolate->js_entry_sp() != nullptr) return;
- if (is_idle) {
- isolate->set_current_vm_state(v8::IDLE);
- } else if (state == v8::IDLE) {
- isolate->set_current_vm_state(v8::EXTERNAL);
- }
+ isolate->SetIdle(is_idle);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index e67f4f7d47..d2e25ae4f7 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -9,7 +9,7 @@
#include "src/contexts.h"
#include "src/debug/debug-interface.h"
#include "src/detachable-vector.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects/js-collection.h"
@@ -124,7 +124,8 @@ class Utils {
if (!condition) Utils::ReportApiFailure(location, message);
return condition;
}
- static void ReportOOMFailure(const char* location, bool is_heap_oom);
+ static void ReportOOMFailure(v8::internal::Isolate* isolate,
+ const char* location, bool is_heap_oom);
static inline Local<Context> ToLocal(
v8::internal::Handle<v8::internal::Context> obj);
@@ -202,6 +203,8 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Uint32> Uint32ToLocal(
v8::internal::Handle<v8::internal::Object> obj);
+ static inline Local<BigInt> ToLocal(
+ v8::internal::Handle<v8::internal::BigInt> obj);
static inline Local<FunctionTemplate> ToLocal(
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<ObjectTemplate> ToLocal(
@@ -333,6 +336,7 @@ MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+MAKE_TO_LOCAL(ToLocal, BigInt, BigInt);
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
@@ -630,7 +634,6 @@ void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
}
void HandleScopeImplementer::LeaveMicrotaskContext() {
- DCHECK(microtask_context_);
microtask_context_ = nullptr;
entered_context_count_during_microtasks_ = 0;
}
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4d7d9895ce..280e4ddfae 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -73,7 +73,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (Assembler::IsMovW(Memory::int32_at(pc_))) {
return reinterpret_cast<Address>(pc_);
} else {
@@ -125,6 +126,12 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
@@ -137,6 +144,12 @@ Address RelocInfo::target_internal_reference_address() {
return reinterpret_cast<Address>(pc_);
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -151,6 +164,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -175,6 +193,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 1011db4b80..87acd59e84 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -5081,7 +5081,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
@@ -5144,14 +5144,6 @@ void Assembler::dq(uint64_t value) {
pc_ += sizeof(uint64_t);
}
-
-void Assembler::emit_code_stub_address(Code* stub) {
- CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) =
- reinterpret_cast<uint32_t>(stub->instruction_start());
- pc_ += sizeof(uint32_t);
-}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
@@ -5175,7 +5167,8 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
ConstantPoolEntry entry(position, value,
sharing_ok || (rmode == RelocInfo::CODE_TARGET &&
- IsCodeTargetSharingAllowed()));
+ IsCodeTargetSharingAllowed()),
+ rmode);
bool shared = false;
if (sharing_ok) {
@@ -5183,7 +5176,8 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& current_entry = pending_32_bit_constants_[i];
if (!current_entry.sharing_ok()) continue;
- if (entry.value() == current_entry.value()) {
+ if (entry.value() == current_entry.value() &&
+ entry.rmode() == current_entry.rmode()) {
entry.set_merged_index(i);
shared = true;
break;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 32baa0ae8d..bbe0ba753e 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -162,8 +162,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
- sizeof(Register) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
// r7: context register
@@ -218,8 +218,8 @@ class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(SwVfpRegister) &&
- sizeof(SwVfpRegister) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
+static_assert(sizeof(SwVfpRegister) == sizeof(int),
"SwVfpRegister can efficiently be passed by value");
typedef SwVfpRegister FloatRegister;
@@ -256,8 +256,8 @@ class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(DwVfpRegister) &&
- sizeof(DwVfpRegister) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
+static_assert(sizeof(DwVfpRegister) == sizeof(int),
"DwVfpRegister can efficiently be passed by value");
typedef DwVfpRegister DoubleRegister;
@@ -1491,9 +1491,6 @@ class Assembler : public AssemblerBase {
void dq(uint64_t data);
void dp(uintptr_t data) { dd(data); }
- // Emits the address of the code stub's first instruction.
- void emit_code_stub_address(Code* stub);
-
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
@@ -1820,15 +1817,14 @@ class UseScratchRegisterScope {
return reg;
}
+ // Check if we have registers available to acquire.
+ bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
+ bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
+
private:
friend class Assembler;
friend class TurboAssembler;
- // Check if we have registers available to acquire.
- // These methods are kept private intentionally to restrict their usage to the
- // assemblers. Choosing to emit a difference instruction sequence depending on
- // the availability of scratch registers is generally their job.
- bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
template <typename T>
bool CanAcquireVfp() const;
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 2695bafc1b..814d341f7e 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -141,42 +141,30 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r4;
Label call_runtime, done, int_exponent;
- if (exponent_type() == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ vldr(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
- __ b(eq, &int_exponent);
+ // Detect integer exponents stored as double.
+ __ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
+ __ b(eq, &int_exponent);
- __ push(lr);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- }
- __ pop(lr);
- __ MovFromFloatResult(double_result);
- __ b(&done);
+ __ push(lr);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
+ __ pop(lr);
+ __ MovFromFloatResult(double_result);
+ __ b(&done);
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type() == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
+
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, Double(1.0), scratch2);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index b3e880e048..ff439f8259 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -9,6 +9,7 @@
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/arm/frame-constants-arm.h
index 1230a26956..3d69f6dfd1 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/arm/frame-constants-arm.h
@@ -10,36 +10,38 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
+ static constexpr int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 3a96b640a2..e363e0ecfe 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -1370,30 +1370,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- mov(r4, Operand(debug_is_active));
- ldrsb(r4, MemOperand(r4));
- cmp(r4, Operand(0));
- b(eq, &skip_hook);
+ Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(0));
- b(ne, &call_hook);
-
- ldr(r4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(r4, &skip_hook);
- ldr(r4, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
- tst(r4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
b(eq, &skip_hook);
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1730,12 +1715,20 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
- mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ mov(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<int32_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ cmp(in, Operand(kClearedWeakHeapObject));
+ b(eq, target_if_cleared);
+
+ and_(out, in, Operand(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -1892,6 +1885,20 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
+ push(object);
+ ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ ldrb(object, FieldMemOperand(object, Map::kBitFieldOffset));
+ tst(object, Operand(Map::IsConstructorBit::kMask));
+ pop(object);
+ Check(ne, AbortReason::kOperandIsNotAConstructor);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1958,7 +1965,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
void TurboAssembler::CheckFor32DRegs(Register scratch) {
- mov(scratch, Operand(ExternalReference::cpu_features()));
+ mov(scratch, Operand(ExternalReference::cpu_features(isolate())));
ldr(scratch, MemOperand(scratch));
tst(scratch, Operand(1u << VFP32DREGS));
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 50ce6dc005..af8b449de6 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -28,7 +28,7 @@ constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
-constexpr Register kOffHeapTrampolineRegister = r4;
+constexpr Register kOffHeapTrampolineRegister = r6;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@@ -799,7 +799,11 @@ class MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -833,6 +837,9 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 0c31400d9c..52f552270a 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -613,7 +613,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
return Assembler::target_pointer_address_at(pc_);
}
@@ -655,6 +656,12 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
@@ -667,6 +674,12 @@ Address RelocInfo::target_internal_reference_address() {
return reinterpret_cast<Address>(pc_);
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -682,6 +695,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -706,6 +724,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 52c2e4643f..fe81147d76 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -1749,7 +1749,6 @@ void Assembler::stlr(const Register& rt, const Register& rn) {
void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
- DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
@@ -4698,7 +4697,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
byte* buffer = reinterpret_cast<byte*>(buffer_);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index c956c072b7..94122e3939 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -39,8 +39,8 @@ namespace internal {
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
- R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27) \
- R(x28)
+ R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
+ R(x27) R(x28)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@@ -60,6 +60,8 @@ namespace internal {
V(q16) V(q17) V(q18) V(q19) V(q20) V(q21) V(q22) V(q23) \
V(q24) V(q25) V(q26) V(q27) V(q28) V(q29) V(q30) V(q31)
+// Register d29 could be allocated, but we keep an even length list here, in
+// order to make stack alignment easier for save and restore.
#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
@@ -202,7 +204,6 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
bool IsSameSizeAndType(const CPURegister& other) const;
- // V8 compatibility.
bool is(const CPURegister& other) const { return Is(other); }
bool is_valid() const { return IsValid(); }
@@ -237,8 +238,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
};
-static_assert(IS_TRIVIALLY_COPYABLE(CPURegister),
- "CPURegister can efficiently be passed by value");
+ASSERT_TRIVIALLY_COPYABLE(CPURegister);
class Register : public CPURegister {
public:
@@ -256,25 +256,6 @@ class Register : public CPURegister {
static Register XRegFromCode(unsigned code);
static Register WRegFromCode(unsigned code);
- // Start of V8 compatibility section ---------------------
- // These memebers are necessary for compilation.
- // A few of them may be unused for now.
-
- // We allow crankshaft to use the following registers:
- // - x0 to x15
- // - x18 to x24
- // - x27 (also context)
- //
- // TODO(all): Register x25 is currently free and could be available for
- // crankshaft, but we don't use it as we might use it as a per function
- // literal pool pointer in the future.
- //
- // TODO(all): Consider storing cp in x25 to have only two ranges.
- // We split allocatable registers in three ranges called
- // - "low range"
- // - "high range"
- // - "context"
-
static Register from_code(int code) {
// Always return an X register.
return Register::Create(code, kXRegSizeInBits);
@@ -286,14 +267,11 @@ class Register : public CPURegister {
return Register::Create<code, kXRegSizeInBits>();
}
- // End of V8 compatibility section -----------------------
- //
private:
constexpr explicit Register(const CPURegister& r) : CPURegister(r) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register),
- "Register can efficiently be passed by value");
+ASSERT_TRIVIALLY_COPYABLE(Register);
constexpr bool kPadArguments = true;
constexpr bool kSimpleFPAliasing = true;
@@ -405,19 +383,13 @@ class VRegister : public CPURegister {
unsigned LaneSizeInBits() const { return LaneSizeInBytes() * 8; }
- // Start of V8 compatibility section ---------------------
static constexpr int kMaxNumRegisters = kNumberOfVRegisters;
STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast);
- // Crankshaft can use all the V registers except:
- // - d15 which is used to keep the 0 double value
- // - d30 which is used in crankshaft as a double scratch register
- // - d31 which is used in the MacroAssembler as a double scratch register
static VRegister from_code(int code) {
// Always return a D register.
return VRegister::Create(code, kDRegSizeInBits);
}
- // End of V8 compatibility section -----------------------
private:
int lane_count_;
@@ -430,8 +402,7 @@ class VRegister : public CPURegister {
}
};
-static_assert(IS_TRIVIALLY_COPYABLE(VRegister),
- "VRegister can efficiently be passed by value");
+ASSERT_TRIVIALLY_COPYABLE(VRegister);
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
@@ -439,8 +410,6 @@ static_assert(IS_TRIVIALLY_COPYABLE(VRegister),
constexpr Register NoReg = Register::no_reg();
constexpr VRegister NoVReg = VRegister::no_reg();
constexpr CPURegister NoCPUReg = CPURegister::no_reg();
-
-// v8 compatibility.
constexpr Register no_reg = NoReg;
#define DEFINE_REGISTER(register_class, name, ...) \
@@ -491,11 +460,9 @@ ALIAS_REGISTER(Register, padreg, x31);
// Keeps the 0 double value.
ALIAS_REGISTER(VRegister, fp_zero, d15);
// MacroAssembler fixed V Registers.
-ALIAS_REGISTER(VRegister, fp_fixed1, d27);
-ALIAS_REGISTER(VRegister, fp_fixed2, d28);
-ALIAS_REGISTER(VRegister, fp_fixed3, d29); // same as Crankshaft scratch.
-// Crankshaft double scratch register.
-ALIAS_REGISTER(VRegister, crankshaft_fp_scratch, d29);
+ALIAS_REGISTER(VRegister, fp_fixed1, d28);
+ALIAS_REGISTER(VRegister, fp_fixed2, d29);
+
// MacroAssembler scratch V registers.
ALIAS_REGISTER(VRegister, fp_scratch, d30);
ALIAS_REGISTER(VRegister, fp_scratch1, d30);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 07d020880d..c1cb3025a1 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -122,36 +122,29 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label done;
// Unpack the inputs.
- if (exponent_type() == TAGGED) {
- __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
- __ Ldr(exponent_double,
- FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
- }
// Handle double (heap number) exponents.
- if (exponent_type() != INTEGER) {
- // Detect integer exponents stored as doubles and handle those in the
- // integer fast-path.
- __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
- scratch0_double, &exponent_is_integer);
-
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ Mov(saved_lr, lr);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- __ Mov(lr, saved_lr);
- __ B(&done);
- }
+ // Detect integer exponents stored as doubles and handle those in the
+ // integer fast-path.
+ __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
+ scratch0_double, &exponent_is_integer);
- // Handle SMI exponents.
- __ Bind(&exponent_is_smi);
- // x10 base_tagged The tagged base (input).
- // x11 exponent_tagged The tagged exponent (input).
- // d1 base_double The base as a double.
- __ SmiUntag(exponent_integer, exponent_tagged);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ Mov(saved_lr, lr);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
+ __ Mov(lr, saved_lr);
+ __ B(&done);
}
+ // Handle SMI exponents.
+ __ Bind(&exponent_is_smi);
+ // x10 base_tagged The tagged base (input).
+ // x11 exponent_tagged The tagged exponent (input).
+ // d1 base_double The base as a double.
+ __ SmiUntag(exponent_integer, exponent_tagged);
+
__ Bind(&exponent_is_integer);
// x10 base_tagged The tagged base (input).
// x11 exponent_tagged The tagged exponent (input).
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index 1016e3707a..ad77033280 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -8,6 +8,7 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/arm64/frame-constants-arm64.h
index 00ac99d1be..662f426033 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/arm64/frame-constants-arm64.h
@@ -31,31 +31,33 @@ namespace internal {
//
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -3 * kPointerSize;
- static const int kFixedFrameSize = 6 * kPointerSize;
+ static constexpr int kCallerFPOffset = -3 * kPointerSize;
+ static constexpr int kFixedFrameSize = 6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
DEFINE_TYPED_FRAME_SIZES(3);
- static const int kLastExitFrameField = kPaddingOffset;
+ static constexpr int kLastExitFrameField = kPaddingOffset;
- static const int kConstantPoolOffset = 0; // Not used
+ static constexpr int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
- static const int kLastParameterOffset = 2 * kPointerSize;
+ static constexpr int kLastParameterOffset = 2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
};
} // namespace internal
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 267bc2151b..5bbf71e28c 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1625,6 +1625,21 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ Tst(temp, Operand(Map::IsConstructorBit::kMask));
+
+ Check(ne, AbortReason::kOperandIsNotAConstructor);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
@@ -1759,9 +1774,9 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
- Mov(kOffHeapTrampolineRegister, bytes_address);
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ Mov(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<uint64_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Br(kOffHeapTrampolineRegister);
}
@@ -2144,28 +2159,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- Mov(x4, Operand(debug_is_active));
- Ldrsb(x4, MemOperand(x4));
- Cbz(x4, &skip_hook);
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- Cbnz(x4, &call_hook);
-
- Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(x4, &skip_hook);
- Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
- Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
- B(eq, &skip_hook);
+ Cbz(x4, &skip_hook);
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2503,6 +2504,12 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
Pop(fp, lr);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ CompareAndBranch(in, Operand(kClearedWeakHeapObject), eq, target_if_cleared);
+
+ and_(out, in, Operand(~kWeakHeapObjectMask));
+}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index c72cb39536..6b1b8957cb 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -1713,6 +1713,9 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -1771,7 +1774,7 @@ class MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
@@ -1922,6 +1925,10 @@ class MacroAssembler : public TurboAssembler {
}
// ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // ---------------------------------------------------------------------------
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 5c72cf1c90..290be13cd6 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -2083,6 +2083,8 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
return TransactionSize::HalfWord;
case 4:
return TransactionSize::Word;
+ case 8:
+ return TransactionSize::DoubleWord;
default:
UNREACHABLE();
}
@@ -2127,6 +2129,10 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case LDAXR_w:
set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
break;
+ case LDAR_x:
+ case LDAXR_x:
+ set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+ break;
default:
UNIMPLEMENTED();
}
@@ -2150,6 +2156,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLXR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
+ case STLXR_x:
+ MemoryWrite<uint64_t>(address, xreg(rt));
+ break;
default:
UNIMPLEMENTED();
}
@@ -2171,6 +2180,9 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLR_w:
MemoryWrite<uint32_t>(address, wreg(rt));
break;
+ case STLR_x:
+ MemoryWrite<uint64_t>(address, xreg(rt));
+ break;
default:
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 18fa4d44ec..8cd1e02b6f 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -2196,6 +2196,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
Byte = 1,
HalfWord = 2,
Word = 4,
+ DoubleWord = 8,
};
TransactionSize get_transaction_size(unsigned size);
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index fc56b7e23a..3d835fb2cb 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -10,16 +10,16 @@
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/execution.h"
-#include "src/factory.h"
#include "src/handles.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
+#include "src/unoptimized-compilation-info.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
@@ -65,21 +65,20 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
-#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
- if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#fname))); \
- Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
- if (shared->HasLazyDeserializationBuiltinId()) { \
- if (shared->lazy_deserialization_builtin_id() != Builtins::kMath##FName) \
- return false; \
- } else if (shared->code() != \
- isolate->builtins()->builtin(Builtins::kMath##FName)) { \
- return false; \
- } \
+#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
+ if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#fname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
+ if (!shared->HasBuiltinId() || \
+ shared->builtin_id() != Builtins::kMath##FName) { \
+ return false; \
+ } \
+ DCHECK_EQ(shared->GetCode(), \
+ isolate->builtins()->builtin(Builtins::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
@@ -185,12 +184,12 @@ void ReportInstantiationFailure(Handle<Script> script, int position,
// [2] FinalizeJobImpl: The module is handed to WebAssembly which decodes it
// into an internal representation and eventually compiles it to machine
// code.
-class AsmJsCompilationJob final : public CompilationJob {
+class AsmJsCompilationJob final : public UnoptimizedCompilationJob {
public:
explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator)
- : CompilationJob(parse_info->stack_limit(), parse_info,
- &compilation_info_, "AsmJs", State::kReadyToExecute),
+ : UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
+ &compilation_info_),
allocator_(allocator),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
@@ -203,16 +202,16 @@ class AsmJsCompilationJob final : public CompilationJob {
translate_zone_size_(0) {}
protected:
- Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl(Isolate* isolate) final;
+ Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) final;
private:
void RecordHistograms(Isolate* isolate);
AccountingAllocator* allocator_;
Zone zone_;
- CompilationInfo compilation_info_;
+ UnoptimizedCompilationInfo compilation_info_;
wasm::ZoneBuffer* module_;
wasm::ZoneBuffer* asm_offsets_;
wasm::AsmJsParser::StdlibSet stdlib_uses_;
@@ -226,12 +225,7 @@ class AsmJsCompilationJob final : public CompilationJob {
DISALLOW_COPY_AND_ASSIGN(AsmJsCompilationJob);
};
-CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl(Isolate* isolate) {
- UNREACHABLE(); // Prepare should always be skipped.
- return SUCCEEDED;
-}
-
-CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
+UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
size_t compile_zone_start = compilation_info()->zone()->allocation_size();
base::ElapsedTimer translate_timer;
@@ -276,7 +270,8 @@ CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
return SUCCEEDED;
}
-CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
+UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
// Step 2: Compile and decode the WebAssembly module.
base::ElapsedTimer compile_timer;
compile_timer.Start();
@@ -302,7 +297,6 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
result->set(kWasmDataCompiledModule, *compiled);
result->set(kWasmDataUsesBitSet, *uses_bitset);
compilation_info()->SetAsmWasmData(result);
- compilation_info()->SetCode(BUILTIN_CODE(isolate, InstantiateAsmJs));
RecordHistograms(isolate);
ReportCompilationSuccess(parse_info()->script(),
@@ -329,9 +323,9 @@ void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
translation_throughput);
}
-CompilationJob* AsmJs::NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator) {
+UnoptimizedCompilationJob* AsmJs::NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator) {
return new AsmJsCompilationJob(parse_info, literal, allocator);
}
@@ -350,7 +344,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<Script> script(Script::cast(shared->script()));
// TODO(mstarzinger): The position currently points to the module definition
// but should instead point to the instantiation site (more intuitive).
- int position = shared->start_position();
+ int position = shared->StartPosition();
// Check that all used stdlib members are valid.
bool stdlib_use_of_typed_array_present = false;
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 9d2ab752ed..1a87ce99b4 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -13,19 +13,18 @@ namespace v8 {
namespace internal {
class AccountingAllocator;
-class CompilationInfo;
-class CompilationJob;
class FunctionLiteral;
class JSArrayBuffer;
class ParseInfo;
class SharedFunctionInfo;
+class UnoptimizedCompilationJob;
// Interface to compile and instantiate for asm.js modules.
class AsmJs {
public:
- static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
- FunctionLiteral* literal,
- AccountingAllocator* allocator);
+ static UnoptimizedCompilationJob* NewCompilationJob(
+ ParseInfo* parse_info, FunctionLiteral* literal,
+ AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo>,
Handle<FixedArray> wasm_data,
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index c566f35acb..48d10418c0 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -34,112 +34,19 @@
#include "src/assembler.h"
-#include <math.h>
-#include <string.h>
-#include <cmath>
-
-#include "src/api.h"
#include "src/assembler-inl.h"
-#include "src/base/cpu.h"
-#include "src/base/functional.h"
-#include "src/base/ieee754.h"
-#include "src/base/lazy-instance.h"
-#include "src/base/platform/platform.h"
-#include "src/base/utils/random-number-generator.h"
-#include "src/codegen.h"
-#include "src/compiler/code-assembler.h"
-#include "src/counters.h"
-#include "src/debug/debug.h"
+#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/disassembler.h"
-#include "src/elements.h"
-#include "src/execution.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/interpreter/interpreter.h"
+#include "src/instruction-stream.h"
#include "src/isolate.h"
#include "src/ostreams.h"
-#include "src/regexp/jsregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
-#include "src/regexp/regexp-stack.h"
-#include "src/register-configuration.h"
-#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
-#include "src/string-search.h"
-#include "src/wasm/wasm-external-refs.h"
-
-// Include native regexp-macro-assembler.
-#ifndef V8_INTERPRETED_REGEXP
-#if V8_TARGET_ARCH_IA32
-#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
-#else // Unknown architecture.
-#error "Unknown architecture."
-#endif // Target architecture.
-#endif // V8_INTERPRETED_REGEXP
-
-#ifdef V8_INTL_SUPPORT
-#include "src/intl.h"
-#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
-// -----------------------------------------------------------------------------
-// Common double constants.
-
-struct DoubleConstant BASE_EMBEDDED {
-double min_int;
-double one_half;
-double minus_one_half;
-double negative_infinity;
-uint64_t the_hole_nan;
-double uint32_bias;
-};
-
-static DoubleConstant double_constants;
-
-static struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
-} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
-
-static struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
-} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
-
-static struct V8_ALIGNED(16) {
- uint64_t a;
- uint64_t b;
-} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
- uint64_t{0x7FFFFFFFFFFFFFFF}};
-
-static struct V8_ALIGNED(16) {
- uint64_t a;
- uint64_t b;
-} double_negate_constant = {uint64_t{0x8000000000000000},
- uint64_t{0x8000000000000000}};
-
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
@@ -295,10 +202,15 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
-void RelocInfo::set_wasm_context_reference(Address address,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmContextReference(rmode_));
- set_embedded_address(address, icache_flush_mode);
+// static
+bool RelocInfo::OffHeapTargetIsCodedSpecially() {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
+ defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
+ return false;
+#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
+ return true;
+#endif
}
void RelocInfo::set_global_handle(Address address,
@@ -324,22 +236,6 @@ Address RelocInfo::global_handle() const {
return embedded_address();
}
-uint32_t RelocInfo::wasm_function_table_size_reference() const {
- DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- return embedded_size();
-}
-
-Address RelocInfo::wasm_context_reference() const {
- DCHECK(IsWasmContextReference(rmode_));
- return embedded_address();
-}
-
-void RelocInfo::update_wasm_function_table_size_reference(
- uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
- DCHECK(IsWasmFunctionTableSizeReference(rmode_));
- set_embedded_size(new_size, icache_flush_mode);
-}
-
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
@@ -556,7 +452,7 @@ void RelocIterator::next() {
RelocIterator::RelocIterator(Code* code, int mode_mask)
: mode_mask_(mode_mask) {
rinfo_.host_ = code;
- rinfo_.pc_ = code->instruction_start();
+ rinfo_.pc_ = code->raw_instruction_start();
rinfo_.data_ = 0;
rinfo_.constant_pool_ = code->constant_pool();
// Relocation info is read backwards.
@@ -625,6 +521,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
+ case OFF_HEAP_TARGET:
+ return "off heap target";
case DEOPT_SCRIPT_OFFSET:
return "deopt script offset";
case DEOPT_INLINING_ID:
@@ -637,14 +535,12 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
- case WASM_CONTEXT_REFERENCE:
- return "wasm context reference";
- case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
- return "wasm function table size reference";
case WASM_GLOBAL_HANDLE:
return "global handle";
case WASM_CALL:
return "internal wasm call";
+ case WASM_CODE_TABLE_ENTRY:
+ return "wasm code table entry";
case JS_TO_WASM_CALL:
return "js to wasm call";
case NUMBER_OF_MODES:
@@ -724,8 +620,14 @@ void RelocInfo::Verify(Isolate* isolate) {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
Code* code = Code::cast(isolate->FindCodeObject(pc));
- CHECK(target >= code->instruction_start());
- CHECK(target <= code->instruction_end());
+ CHECK(target >= code->InstructionStart());
+ CHECK(target <= code->InstructionEnd());
+ break;
+ }
+ case OFF_HEAP_TARGET: {
+ Address addr = target_off_heap_target();
+ CHECK_NOT_NULL(addr);
+ CHECK_NOT_NULL(InstructionStream::TryLookupCode(isolate, addr));
break;
}
case RUNTIME_ENTRY:
@@ -737,11 +639,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
- case WASM_CONTEXT_REFERENCE:
- case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case JS_TO_WASM_CALL:
+ case WASM_CODE_TABLE_ENTRY:
case NONE:
break;
case NUMBER_OF_MODES:
@@ -752,896 +653,6 @@ void RelocInfo::Verify(Isolate* isolate) {
}
#endif // VERIFY_HEAP
-// Implementation of ExternalReference
-
-static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
- switch (result_size) {
- case 1:
- return ExternalReference::BUILTIN_CALL;
- case 2:
- return ExternalReference::BUILTIN_CALL_PAIR;
- }
- UNREACHABLE();
-}
-
-void ExternalReference::SetUp() {
- double_constants.min_int = kMinInt;
- double_constants.one_half = 0.5;
- double_constants.minus_one_half = -0.5;
- double_constants.the_hole_nan = kHoleNanInt64;
- double_constants.negative_infinity = -V8_INFINITY;
- double_constants.uint32_bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-}
-
-ExternalReference::ExternalReference(Address address, Isolate* isolate)
- : address_(Redirect(isolate, address)) {}
-
-ExternalReference::ExternalReference(
- ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL,
- Isolate* isolate = nullptr)
- : address_(Redirect(isolate, fun->address(), type)) {}
-
-ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
- : ExternalReference(Runtime::FunctionForId(id), isolate) {}
-
-ExternalReference::ExternalReference(const Runtime::Function* f,
- Isolate* isolate)
- : address_(Redirect(isolate, f->entry,
- BuiltinCallTypeForResultSize(f->result_size))) {}
-
-ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
- return ExternalReference(isolate);
-}
-
-ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
- return ExternalReference(isolate->builtins()->builtins_table_address());
-}
-
-ExternalReference ExternalReference::handle_scope_implementer_address(
- Isolate* isolate) {
- return ExternalReference(isolate->handle_scope_implementer_address());
-}
-
-ExternalReference ExternalReference::pending_microtask_count_address(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_microtask_count_address());
-}
-
-ExternalReference ExternalReference::interpreter_dispatch_table_address(
- Isolate* isolate) {
- return ExternalReference(isolate->interpreter()->dispatch_table_address());
-}
-
-ExternalReference ExternalReference::interpreter_dispatch_counters(
- Isolate* isolate) {
- return ExternalReference(
- isolate->interpreter()->bytecode_dispatch_counters_table());
-}
-
-ExternalReference ExternalReference::bytecode_size_table_address(
- Isolate* isolate) {
- return ExternalReference(
- interpreter::Bytecodes::bytecode_size_table_address());
-}
-
-ExternalReference::ExternalReference(StatsCounter* counter)
- : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
-
-ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
- : address_(isolate->get_address_from_id(id)) {}
-
-ExternalReference::ExternalReference(const SCTableReference& table_ref)
- : address_(table_ref.address()) {}
-
-ExternalReference ExternalReference::
- incremental_marking_record_write_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
-}
-
-ExternalReference ExternalReference::store_buffer_overflow_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
-}
-
-ExternalReference ExternalReference::delete_handle_scope_extensions(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(HandleScope::DeleteExtensions)));
-}
-
-ExternalReference ExternalReference::get_date_field_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
-}
-
-ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
- return ExternalReference(isolate->date_cache()->stamp_address());
-}
-
-void ExternalReference::set_redirector(
- Isolate* isolate, ExternalReferenceRedirector* redirector) {
- // We can't stack them.
- DCHECK_NULL(isolate->external_reference_redirector());
- isolate->set_external_reference_redirector(
- reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
-}
-
-ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
- return ExternalReference(isolate->stress_deopt_count_address());
-}
-
-ExternalReference ExternalReference::force_slow_path(Isolate* isolate) {
- return ExternalReference(isolate->force_slow_path_address());
-}
-
-ExternalReference ExternalReference::new_deoptimizer_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
-}
-
-ExternalReference ExternalReference::compute_output_frames_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
-}
-
-ExternalReference ExternalReference::wasm_f32_trunc(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_floor(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f32_floor_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_ceil(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
-}
-ExternalReference ExternalReference::wasm_f32_nearest_int(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_trunc(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_floor(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f64_floor_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_ceil(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_f64_nearest_int(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_to_float32(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_to_float32(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_to_float64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_to_float64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float32_to_int64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float32_to_uint64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float64_to_int64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float64_to_uint64(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_div(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::int64_div_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_int64_mod(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::int64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_div(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::uint64_div_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_ctz(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word32_ctz_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word64_ctz(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word64_ctz_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_popcnt(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word32_popcnt_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_rol(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word32_rol_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_word32_ror(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::word32_ror_wrapper)));
-}
-
-static void f64_acos_wrapper(double* param) {
- WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
-}
-
-ExternalReference ExternalReference::f64_acos_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
-}
-
-static void f64_asin_wrapper(double* param) {
- WriteDoubleValue(param, base::ieee754::asin(ReadDoubleValue(param)));
-}
-
-ExternalReference ExternalReference::f64_asin_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_float64_pow(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::float64_pow_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_set_thread_in_wasm_flag(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::set_thread_in_wasm_flag)));
-}
-
-ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::clear_thread_in_wasm_flag)));
-}
-
-static void f64_mod_wrapper(double* param0, double* param1) {
- WriteDoubleValue(param0,
- Modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
-}
-
-ExternalReference ExternalReference::f64_mod_wrapper_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
-}
-
-ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
-}
-
-ExternalReference ExternalReference::log_enter_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
-}
-
-ExternalReference ExternalReference::log_leave_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
-}
-
-ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
- return ExternalReference(isolate->heap()->roots_array_start());
-}
-
-ExternalReference ExternalReference::allocation_sites_list_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->allocation_sites_list_address());
-}
-
-ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_jslimit());
-}
-
-ExternalReference ExternalReference::address_of_real_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_limit(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->limit_address());
-}
-
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer_top_address());
-}
-
-ExternalReference ExternalReference::heap_is_marking_flag_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
-}
-
-ExternalReference ExternalReference::new_space_allocation_top_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
-}
-
-ExternalReference ExternalReference::new_space_allocation_limit_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
-}
-
-ExternalReference ExternalReference::old_space_allocation_top_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
-}
-
-ExternalReference ExternalReference::old_space_allocation_limit_address(
- Isolate* isolate) {
- return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
-}
-
-ExternalReference ExternalReference::handle_scope_level_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_level_address(isolate));
-}
-
-ExternalReference ExternalReference::handle_scope_next_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_next_address(isolate));
-}
-
-ExternalReference ExternalReference::handle_scope_limit_address(
- Isolate* isolate) {
- return ExternalReference(HandleScope::current_limit_address(isolate));
-}
-
-ExternalReference ExternalReference::scheduled_exception_address(
- Isolate* isolate) {
- return ExternalReference(isolate->scheduled_exception_address());
-}
-
-ExternalReference ExternalReference::address_of_pending_message_obj(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_message_obj_address());
-}
-
-ExternalReference ExternalReference::address_of_min_int() {
- return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
-}
-
-ExternalReference ExternalReference::address_of_one_half() {
- return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
-}
-
-ExternalReference ExternalReference::address_of_minus_one_half() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.minus_one_half));
-}
-
-ExternalReference ExternalReference::address_of_negative_infinity() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.negative_infinity));
-}
-
-ExternalReference ExternalReference::address_of_the_hole_nan() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.the_hole_nan));
-}
-
-ExternalReference ExternalReference::address_of_uint32_bias() {
- return ExternalReference(
- reinterpret_cast<void*>(&double_constants.uint32_bias));
-}
-
-ExternalReference ExternalReference::address_of_float_abs_constant() {
- return ExternalReference(reinterpret_cast<void*>(&float_absolute_constant));
-}
-
-ExternalReference ExternalReference::address_of_float_neg_constant() {
- return ExternalReference(reinterpret_cast<void*>(&float_negate_constant));
-}
-
-ExternalReference ExternalReference::address_of_double_abs_constant() {
- return ExternalReference(reinterpret_cast<void*>(&double_absolute_constant));
-}
-
-ExternalReference ExternalReference::address_of_double_neg_constant() {
- return ExternalReference(reinterpret_cast<void*>(&double_negate_constant));
-}
-
-ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
- return ExternalReference(isolate->is_profiling_address());
-}
-
-ExternalReference ExternalReference::invoke_function_callback(
- Isolate* isolate) {
- Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
- ApiFunction thunk_fun(thunk_address);
- return ExternalReference(&thunk_fun, thunk_type, isolate);
-}
-
-ExternalReference ExternalReference::invoke_accessor_getter_callback(
- Isolate* isolate) {
- Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
- ExternalReference::Type thunk_type =
- ExternalReference::PROFILING_GETTER_CALL;
- ApiFunction thunk_fun(thunk_address);
- return ExternalReference(&thunk_fun, thunk_type, isolate);
-}
-
-#ifndef V8_INTERPRETED_REGEXP
-
-ExternalReference ExternalReference::re_check_stack_guard_state(
- Isolate* isolate) {
- Address function;
-#if V8_TARGET_ARCH_X64
- function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
-#elif V8_TARGET_ARCH_IA32
- function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
-#elif V8_TARGET_ARCH_ARM64
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
-#elif V8_TARGET_ARCH_ARM
- function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
-#elif V8_TARGET_ARCH_PPC
- function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
-#elif V8_TARGET_ARCH_MIPS
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
-#elif V8_TARGET_ARCH_MIPS64
- function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
-#elif V8_TARGET_ARCH_S390
- function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
-#else
- UNREACHABLE();
-#endif
- return ExternalReference(Redirect(isolate, function));
-}
-
-ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
-}
-
-ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate,
- FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
-}
-
-ExternalReference ExternalReference::re_word_character_map() {
- return ExternalReference(
- NativeRegExpMacroAssembler::word_character_map_address());
-}
-
-ExternalReference ExternalReference::address_of_static_offsets_vector(
- Isolate* isolate) {
- return ExternalReference(
- reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->regexp_stack()->memory_address());
-}
-
-ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
- Isolate* isolate) {
- return ExternalReference(isolate->regexp_stack()->memory_size_address());
-}
-
-#endif // V8_INTERPRETED_REGEXP
-
-ExternalReference ExternalReference::ieee754_acos_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::acos), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_acosh_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(base::ieee754::acosh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_asin_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::asin), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_asinh_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(base::ieee754::asinh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atan_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::atan), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atanh_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(base::ieee754::atanh), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_atan2_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(base::ieee754::atan2), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cbrt_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(base::ieee754::cbrt),
- BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cos_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::cos), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_cosh_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::cosh), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_exp_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::exp), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_expm1_function(Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(base::ieee754::expm1), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::log), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log1p_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::log1p), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log10_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::log10), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_log2_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::log2), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_sin_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::sin), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_sinh_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::sinh), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_tan_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::tan), BUILTIN_FP_CALL));
-}
-
-ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
-}
-
-void* libc_memchr(void* string, int character, size_t search_length) {
- return memchr(string, character, search_length);
-}
-
-ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
-}
-
-void* libc_memcpy(void* dest, const void* src, size_t n) {
- return memcpy(dest, src, n);
-}
-
-ExternalReference ExternalReference::libc_memcpy_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memcpy)));
-}
-
-void* libc_memmove(void* dest, const void* src, size_t n) {
- return memmove(dest, src, n);
-}
-
-ExternalReference ExternalReference::libc_memmove_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memmove)));
-}
-
-void* libc_memset(void* dest, int byte, size_t n) {
- DCHECK_EQ(static_cast<char>(byte), byte);
- return memset(dest, byte, n);
-}
-
-ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
-}
-
-ExternalReference ExternalReference::printf_function(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(std::printf)));
-}
-
-template <typename SubjectChar, typename PatternChar>
-ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
- auto f = SearchStringRaw<SubjectChar, PatternChar>;
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
-}
-
-ExternalReference ExternalReference::orderedhashmap_gethash_raw(
- Isolate* isolate) {
- auto f = OrderedHashMap::GetHash;
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
-}
-
-ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
- typedef Smi* (*GetOrCreateHash)(Isolate * isolate, Object * key);
- GetOrCreateHash f = Object::GetOrCreateHash;
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
-}
-
-ExternalReference ExternalReference::jsreceiver_create_identity_hash(
- Isolate* isolate) {
- typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
- CreateIdentityHash f = JSReceiver::CreateIdentityHash;
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
-}
-
-ExternalReference
-ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(CopyFastNumberJSArrayElementsToTypedArray)));
-}
-
-ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
-}
-
-ExternalReference ExternalReference::copy_typed_array_elements_slice(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
-}
-
-ExternalReference ExternalReference::try_internalize_string_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
-}
-
-ExternalReference ExternalReference::check_object_type(Isolate* isolate) {
- return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CheckObjectType)));
-}
-
-#ifdef V8_INTL_SUPPORT
-ExternalReference ExternalReference::intl_convert_one_byte_to_lower(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(ConvertOneByteToLower)));
-}
-
-ExternalReference ExternalReference::intl_to_latin1_lower_table(
- Isolate* isolate) {
- uint8_t* ptr = const_cast<uint8_t*>(ToLatin1LowerTable());
- return ExternalReference(reinterpret_cast<Address>(ptr));
-}
-#endif // V8_INTL_SUPPORT
-
-// Explicit instantiations for all combinations of 1- and 2-byte strings.
-template ExternalReference
-ExternalReference::search_string_raw<const uint8_t, const uint8_t>(Isolate*);
-template ExternalReference
-ExternalReference::search_string_raw<const uint8_t, const uc16>(Isolate*);
-template ExternalReference
-ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
-template ExternalReference
-ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
-
-ExternalReference ExternalReference::page_flags(Page* page) {
- return ExternalReference(reinterpret_cast<Address>(page) +
- MemoryChunk::kFlagsOffset);
-}
-
-ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
- return ExternalReference(entry);
-}
-
-ExternalReference ExternalReference::cpu_features() {
- DCHECK(CpuFeatures::initialized_);
- return ExternalReference(&CpuFeatures::supported_);
-}
-
-ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
- Isolate* isolate) {
- return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
-}
-
-ExternalReference ExternalReference::debug_is_active_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->is_active_address());
-}
-
-ExternalReference ExternalReference::debug_hook_on_function_call_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->hook_on_function_call_address());
-}
-
-ExternalReference ExternalReference::runtime_function_table_address(
- Isolate* isolate) {
- return ExternalReference(
- const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
-}
-
-ExternalReference ExternalReference::invalidate_prototype_chains_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
-}
-
-double power_helper(Isolate* isolate, double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1 if exponent is 0.
- }
- if (y == 0.5) {
- lazily_initialize_fast_sqrt(isolate);
- return (std::isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
- }
- if (y == -0.5) {
- lazily_initialize_fast_sqrt(isolate);
- return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0,
- isolate); // Convert -0 to +0.
- }
- return power_double_double(x, y);
-}
-
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-double power_double_int(double x, int y) {
- double m = (y < 0) ? 1 / x : x;
- unsigned n = (y < 0) ? -y : y;
- double p = 1;
- while (n != 0) {
- if ((n & 1) != 0) p *= m;
- m *= m;
- if ((n & 2) != 0) p *= m;
- m *= m;
- n >>= 2;
- }
- return p;
-}
-
-double power_double_double(double x, double y) {
- // The checks for special cases can be dropped in ia32 because it has already
- // been done in generated code before bailing out here.
- if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
- return std::numeric_limits<double>::quiet_NaN();
- }
- return Pow(x, y);
-}
-
-double modulo_double_double(double x, double y) { return Modulo(x, y); }
-
-ExternalReference ExternalReference::power_double_double_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(isolate,
- FUNCTION_ADDR(power_double_double),
- BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::mod_two_doubles_operation(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
-}
-
-ExternalReference ExternalReference::debug_last_step_action_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->last_step_action_address());
-}
-
-ExternalReference ExternalReference::debug_suspended_generator_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->suspended_generator_address());
-}
-
-ExternalReference ExternalReference::debug_restart_fp_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->restart_fp_address());
-}
-
-ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
- return ExternalReference(reinterpret_cast<void*>(
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
-}
-
-bool operator==(ExternalReference lhs, ExternalReference rhs) {
- return lhs.address() == rhs.address();
-}
-
-bool operator!=(ExternalReference lhs, ExternalReference rhs) {
- return !(lhs == rhs);
-}
-
-size_t hash_value(ExternalReference reference) {
- return base::hash<Address>()(reference.address());
-}
-
-std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
- os << static_cast<const void*>(reference.address());
- const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
- if (fn) os << "<" << fn->name << ".entry>";
- return os;
-}
-
ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
int double_reach_bits) {
info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index c45ec6910d..e79f4cc869 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -37,17 +37,19 @@
#include <forward_list>
#include <iosfwd>
+#include <map>
#include "src/allocation.h"
-#include "src/builtins/builtins.h"
+#include "src/contexts.h"
#include "src/deoptimize-reason.h"
#include "src/double.h"
+#include "src/external-reference.h"
+#include "src/flags.h"
#include "src/globals.h"
#include "src/label.h"
-#include "src/log.h"
+#include "src/objects.h"
#include "src/register-configuration.h"
#include "src/reglist.h"
-#include "src/runtime/runtime.h"
namespace v8 {
@@ -364,11 +366,6 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
EMBEDDED_OBJECT,
- // Wasm entries are to relocate pointers into the wasm memory embedded in
- // wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not
- // GC'ed.
- WASM_CONTEXT_REFERENCE,
- WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_GLOBAL_HANDLE,
WASM_CALL,
JS_TO_WASM_CALL,
@@ -382,6 +379,9 @@ class RelocInfo {
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
+ // An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
+ OFF_HEAP_TARGET,
+
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@@ -396,6 +396,9 @@ class RelocInfo {
// cannot be encoded as part of another record.
PC_JUMP,
+ // Points to a wasm code table entry.
+ WASM_CODE_TABLE_ENTRY,
+
// Pseudo-types
NUMBER_OF_MODES,
NONE, // never recorded value
@@ -458,22 +461,16 @@ class RelocInfo {
static inline bool IsInternalReferenceEncoded(Mode mode) {
return mode == INTERNAL_REFERENCE_ENCODED;
}
- static inline bool IsNone(Mode mode) { return mode == NONE; }
- static inline bool IsWasmContextReference(Mode mode) {
- return mode == WASM_CONTEXT_REFERENCE;
- }
- static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
- return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ static inline bool IsOffHeapTarget(Mode mode) {
+ return mode == OFF_HEAP_TARGET;
}
+ static inline bool IsNone(Mode mode) { return mode == NONE; }
static inline bool IsWasmReference(Mode mode) {
- return IsWasmPtrReference(mode) || IsWasmSizeReference(mode);
- }
- static inline bool IsWasmSizeReference(Mode mode) {
- return IsWasmFunctionTableSizeReference(mode);
+ return IsWasmPtrReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
- return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_HANDLE ||
- mode == WASM_CALL || mode == JS_TO_WASM_CALL;
+ return mode == WASM_GLOBAL_HANDLE || mode == WASM_CALL ||
+ mode == JS_TO_WASM_CALL;
}
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
@@ -500,22 +497,18 @@ class RelocInfo {
// instructions).
bool IsCodedSpecially();
+ // The static pendant to IsCodedSpecially, just for off-heap targets. Used
+ // during deserialization, when we don't actually have a RelocInfo handy.
+ static bool OffHeapTargetIsCodedSpecially();
+
// If true, the pointer this relocation info refers to is an entry in the
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
- Address wasm_context_reference() const;
- uint32_t wasm_function_table_size_reference() const;
Address global_handle() const;
Address js_to_wasm_address() const;
Address wasm_call_address() const;
- void set_wasm_context_reference(
- Address address,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- void update_wasm_function_table_size_reference(
- uint32_t old_base, uint32_t new_base,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@@ -542,11 +535,16 @@ class RelocInfo {
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ INLINE(Address target_off_heap_target());
INLINE(Cell* target_cell());
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ INLINE(void set_wasm_code_table_entry(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+ INLINE(void set_target_external_reference(
+ Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
@@ -733,349 +731,6 @@ class RelocIterator: public Malloced {
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
-//------------------------------------------------------------------------------
-// External references
-
-// An ExternalReference represents a C++ address used in the generated
-// code. All references to C++ functions and variables must be encapsulated in
-// an ExternalReference instance. This is done in order to track the origin of
-// all external references in the code so that they can be bound to the correct
-// addresses when deserializing a heap.
-class ExternalReference BASE_EMBEDDED {
- public:
- // Used in the simulator to support different native api calls.
- enum Type {
- // Builtin call.
- // Object* f(v8::internal::Arguments).
- BUILTIN_CALL, // default
-
- // Builtin call returning object pair.
- // ObjectPair f(v8::internal::Arguments).
- BUILTIN_CALL_PAIR,
-
- // Builtin that takes float arguments and returns an int.
- // int f(double, double).
- BUILTIN_COMPARE_CALL,
-
- // Builtin call that returns floating point.
- // double f(double, double).
- BUILTIN_FP_FP_CALL,
-
- // Builtin call that returns floating point.
- // double f(double).
- BUILTIN_FP_CALL,
-
- // Builtin call that returns floating point.
- // double f(double, int).
- BUILTIN_FP_INT_CALL,
-
- // Direct call to API function callback.
- // void f(v8::FunctionCallbackInfo&)
- DIRECT_API_CALL,
-
- // Call to function callback via InvokeFunctionCallback.
- // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
- PROFILING_API_CALL,
-
- // Direct call to accessor getter callback.
- // void f(Local<Name> property, PropertyCallbackInfo& info)
- DIRECT_GETTER_CALL,
-
- // Call to accessor getter callback via InvokeAccessorGetterCallback.
- // void f(Local<Name> property, PropertyCallbackInfo& info,
- // AccessorNameGetterCallback callback)
- PROFILING_GETTER_CALL
- };
-
- static void SetUp();
-
- typedef void* ExternalReferenceRedirector(void* original, Type type);
-
- ExternalReference() : address_(nullptr) {}
-
- ExternalReference(Address address, Isolate* isolate);
-
- ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
-
- ExternalReference(Runtime::FunctionId id, Isolate* isolate);
-
- ExternalReference(const Runtime::Function* f, Isolate* isolate);
-
- explicit ExternalReference(StatsCounter* counter);
-
- ExternalReference(IsolateAddressId id, Isolate* isolate);
-
- explicit ExternalReference(const SCTableReference& table_ref);
-
- // Isolate as an external reference.
- static ExternalReference isolate_address(Isolate* isolate);
-
- // The builtins table as an external reference, used by lazy deserialization.
- static ExternalReference builtins_address(Isolate* isolate);
-
- static ExternalReference handle_scope_implementer_address(Isolate* isolate);
- static ExternalReference pending_microtask_count_address(Isolate* isolate);
-
- // One-of-a-kind references. These references are not part of a general
- // pattern. This means that they have to be added to the
- // ExternalReferenceTable in serialize.cc manually.
-
- static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
- static ExternalReference interpreter_dispatch_counters(Isolate* isolate);
- static ExternalReference bytecode_size_table_address(Isolate* isolate);
-
- static ExternalReference incremental_marking_record_write_function(
- Isolate* isolate);
- static ExternalReference store_buffer_overflow_function(
- Isolate* isolate);
- static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
-
- static ExternalReference get_date_field_function(Isolate* isolate);
- static ExternalReference date_cache_stamp(Isolate* isolate);
-
- // Deoptimization support.
- static ExternalReference new_deoptimizer_function(Isolate* isolate);
- static ExternalReference compute_output_frames_function(Isolate* isolate);
-
- static ExternalReference wasm_f32_trunc(Isolate* isolate);
- static ExternalReference wasm_f32_floor(Isolate* isolate);
- static ExternalReference wasm_f32_ceil(Isolate* isolate);
- static ExternalReference wasm_f32_nearest_int(Isolate* isolate);
- static ExternalReference wasm_f64_trunc(Isolate* isolate);
- static ExternalReference wasm_f64_floor(Isolate* isolate);
- static ExternalReference wasm_f64_ceil(Isolate* isolate);
- static ExternalReference wasm_f64_nearest_int(Isolate* isolate);
- static ExternalReference wasm_int64_to_float32(Isolate* isolate);
- static ExternalReference wasm_uint64_to_float32(Isolate* isolate);
- static ExternalReference wasm_int64_to_float64(Isolate* isolate);
- static ExternalReference wasm_uint64_to_float64(Isolate* isolate);
- static ExternalReference wasm_float32_to_int64(Isolate* isolate);
- static ExternalReference wasm_float32_to_uint64(Isolate* isolate);
- static ExternalReference wasm_float64_to_int64(Isolate* isolate);
- static ExternalReference wasm_float64_to_uint64(Isolate* isolate);
- static ExternalReference wasm_int64_div(Isolate* isolate);
- static ExternalReference wasm_int64_mod(Isolate* isolate);
- static ExternalReference wasm_uint64_div(Isolate* isolate);
- static ExternalReference wasm_uint64_mod(Isolate* isolate);
- static ExternalReference wasm_word32_ctz(Isolate* isolate);
- static ExternalReference wasm_word64_ctz(Isolate* isolate);
- static ExternalReference wasm_word32_popcnt(Isolate* isolate);
- static ExternalReference wasm_word64_popcnt(Isolate* isolate);
- static ExternalReference wasm_word32_rol(Isolate* isolate);
- static ExternalReference wasm_word32_ror(Isolate* isolate);
- static ExternalReference wasm_float64_pow(Isolate* isolate);
- static ExternalReference wasm_set_thread_in_wasm_flag(Isolate* isolate);
- static ExternalReference wasm_clear_thread_in_wasm_flag(Isolate* isolate);
-
- static ExternalReference f64_acos_wrapper_function(Isolate* isolate);
- static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
- static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
-
- // Trap callback function for cctest/wasm/wasm-run-utils.h
- static ExternalReference wasm_call_trap_callback_for_testing(
- Isolate* isolate);
-
- // Log support.
- static ExternalReference log_enter_external_function(Isolate* isolate);
- static ExternalReference log_leave_external_function(Isolate* isolate);
-
- // Static variable Heap::roots_array_start()
- static ExternalReference roots_array_start(Isolate* isolate);
-
- // Static variable Heap::allocation_sites_list_address()
- static ExternalReference allocation_sites_list_address(Isolate* isolate);
-
- // Static variable StackGuard::address_of_jslimit()
- V8_EXPORT_PRIVATE static ExternalReference address_of_stack_limit(
- Isolate* isolate);
-
- // Static variable StackGuard::address_of_real_jslimit()
- static ExternalReference address_of_real_stack_limit(Isolate* isolate);
-
- // Static variable RegExpStack::limit_address()
- static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
-
- // Static variables for RegExp.
- static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_address(
- Isolate* isolate);
- static ExternalReference address_of_regexp_stack_memory_size(
- Isolate* isolate);
-
- // Write barrier.
- static ExternalReference store_buffer_top(Isolate* isolate);
- static ExternalReference heap_is_marking_flag_address(Isolate* isolate);
-
- // Used for fast allocation in generated code.
- static ExternalReference new_space_allocation_top_address(Isolate* isolate);
- static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
- static ExternalReference old_space_allocation_top_address(Isolate* isolate);
- static ExternalReference old_space_allocation_limit_address(Isolate* isolate);
-
- static ExternalReference mod_two_doubles_operation(Isolate* isolate);
- static ExternalReference power_double_double_function(Isolate* isolate);
-
- static ExternalReference handle_scope_next_address(Isolate* isolate);
- static ExternalReference handle_scope_limit_address(Isolate* isolate);
- static ExternalReference handle_scope_level_address(Isolate* isolate);
-
- static ExternalReference scheduled_exception_address(Isolate* isolate);
- static ExternalReference address_of_pending_message_obj(Isolate* isolate);
-
- // Static variables containing common double constants.
- static ExternalReference address_of_min_int();
- static ExternalReference address_of_one_half();
- static ExternalReference address_of_minus_one_half();
- static ExternalReference address_of_negative_infinity();
- static ExternalReference address_of_the_hole_nan();
- static ExternalReference address_of_uint32_bias();
-
- // Static variables containing simd constants.
- static ExternalReference address_of_float_abs_constant();
- static ExternalReference address_of_float_neg_constant();
- static ExternalReference address_of_double_abs_constant();
- static ExternalReference address_of_double_neg_constant();
-
- // IEEE 754 functions.
- static ExternalReference ieee754_acos_function(Isolate* isolate);
- static ExternalReference ieee754_acosh_function(Isolate* isolate);
- static ExternalReference ieee754_asin_function(Isolate* isolate);
- static ExternalReference ieee754_asinh_function(Isolate* isolate);
- static ExternalReference ieee754_atan_function(Isolate* isolate);
- static ExternalReference ieee754_atanh_function(Isolate* isolate);
- static ExternalReference ieee754_atan2_function(Isolate* isolate);
- static ExternalReference ieee754_cbrt_function(Isolate* isolate);
- static ExternalReference ieee754_cos_function(Isolate* isolate);
- static ExternalReference ieee754_cosh_function(Isolate* isolate);
- static ExternalReference ieee754_exp_function(Isolate* isolate);
- static ExternalReference ieee754_expm1_function(Isolate* isolate);
- static ExternalReference ieee754_log_function(Isolate* isolate);
- static ExternalReference ieee754_log1p_function(Isolate* isolate);
- static ExternalReference ieee754_log10_function(Isolate* isolate);
- static ExternalReference ieee754_log2_function(Isolate* isolate);
- static ExternalReference ieee754_sin_function(Isolate* isolate);
- static ExternalReference ieee754_sinh_function(Isolate* isolate);
- static ExternalReference ieee754_tan_function(Isolate* isolate);
- static ExternalReference ieee754_tanh_function(Isolate* isolate);
-
- static ExternalReference libc_memchr_function(Isolate* isolate);
- static ExternalReference libc_memcpy_function(Isolate* isolate);
- static ExternalReference libc_memmove_function(Isolate* isolate);
- static ExternalReference libc_memset_function(Isolate* isolate);
-
- static ExternalReference printf_function(Isolate* isolate);
-
- static ExternalReference try_internalize_string_function(Isolate* isolate);
-
- static ExternalReference check_object_type(Isolate* isolate);
-
-#ifdef V8_INTL_SUPPORT
- static ExternalReference intl_convert_one_byte_to_lower(Isolate* isolate);
- static ExternalReference intl_to_latin1_lower_table(Isolate* isolate);
-#endif // V8_INTL_SUPPORT
-
- template <typename SubjectChar, typename PatternChar>
- static ExternalReference search_string_raw(Isolate* isolate);
-
- static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
-
- static ExternalReference get_or_create_hash_raw(Isolate* isolate);
- static ExternalReference jsreceiver_create_identity_hash(Isolate* isolate);
-
- static ExternalReference copy_fast_number_jsarray_elements_to_typed_array(
- Isolate* isolate);
- static ExternalReference copy_typed_array_elements_to_typed_array(
- Isolate* isolate);
- static ExternalReference copy_typed_array_elements_slice(Isolate* isolate);
-
- static ExternalReference page_flags(Page* page);
-
- static ExternalReference ForDeoptEntry(Address entry);
-
- static ExternalReference cpu_features();
-
- static ExternalReference debug_is_active_address(Isolate* isolate);
- static ExternalReference debug_hook_on_function_call_address(
- Isolate* isolate);
-
- static ExternalReference is_profiling_address(Isolate* isolate);
- static ExternalReference invoke_function_callback(Isolate* isolate);
- static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
-
- static ExternalReference promise_hook_or_debug_is_active_address(
- Isolate* isolate);
-
- V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
- Isolate* isolate);
-
- static ExternalReference invalidate_prototype_chains_function(
- Isolate* isolate);
-
- Address address() const { return reinterpret_cast<Address>(address_); }
-
- // Used to read out the last step action of the debugger.
- static ExternalReference debug_last_step_action_address(Isolate* isolate);
-
- // Used to check for suspended generator, used for stepping across await call.
- static ExternalReference debug_suspended_generator_address(Isolate* isolate);
-
- // Used to store the frame pointer to drop to when restarting a frame.
- static ExternalReference debug_restart_fp_address(Isolate* isolate);
-
-#ifndef V8_INTERPRETED_REGEXP
- // C functions called from RegExp generated code.
-
- // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
- static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate);
-
- // Function RegExpMacroAssembler*::CheckStackGuardState()
- static ExternalReference re_check_stack_guard_state(Isolate* isolate);
-
- // Function NativeRegExpMacroAssembler::GrowStack()
- static ExternalReference re_grow_stack(Isolate* isolate);
-
- // byte NativeRegExpMacroAssembler::word_character_bitmap
- static ExternalReference re_word_character_map();
-
-#endif
-
- // This lets you register a function that rewrites all external references.
- // Used by the ARM simulator to catch calls to external references.
- static void set_redirector(Isolate* isolate,
- ExternalReferenceRedirector* redirector);
-
- static ExternalReference stress_deopt_count(Isolate* isolate);
-
- static ExternalReference force_slow_path(Isolate* isolate);
-
- static ExternalReference fixed_typed_array_base_data_offset();
-
- private:
- explicit ExternalReference(void* address)
- : address_(address) {}
-
- static void* Redirect(Isolate* isolate,
- Address address_arg,
- Type type = ExternalReference::BUILTIN_CALL) {
- ExternalReferenceRedirector* redirector =
- reinterpret_cast<ExternalReferenceRedirector*>(
- isolate->external_reference_redirector());
- void* address = reinterpret_cast<void*>(address_arg);
- void* answer =
- (redirector == nullptr) ? address : (*redirector)(address, type);
- return answer;
- }
-
- void* address_;
-};
-
-V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference);
-bool operator!=(ExternalReference, ExternalReference);
-
-size_t hash_value(ExternalReference);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
-
// -----------------------------------------------------------------------------
// Utility functions
@@ -1091,14 +746,18 @@ double power_double_double(double x, double y);
class ConstantPoolEntry {
public:
ConstantPoolEntry() {}
- ConstantPoolEntry(int position, intptr_t value, bool sharing_ok)
+ ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
- value_(value) {}
- ConstantPoolEntry(int position, Double value)
+ value_(value),
+ rmode_(rmode) {}
+ ConstantPoolEntry(int position, Double value,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
: position_(position),
merged_index_(SHARING_ALLOWED),
- value64_(value.AsUint64()) {}
+ value64_(value.AsUint64()),
+ rmode_(rmode) {}
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
@@ -1122,6 +781,7 @@ class ConstantPoolEntry {
}
intptr_t value() const { return value_; }
uint64_t value64() const { return value64_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
@@ -1138,6 +798,9 @@ class ConstantPoolEntry {
intptr_t value_;
uint64_t value64_;
};
+ // TODO(leszeks): The way we use this, it could probably be packed into
+ // merged_index_ if size is a concern.
+ RelocInfo::Mode rmode_;
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index 3679ec762a..35432fa647 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -518,6 +518,15 @@ void AstTraversalVisitor<Subclass>::VisitSpread(Spread* expr) {
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitStoreInArrayLiteral(
+ StoreInArrayLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->array()));
+ RECURSE_EXPRESSION(Visit(expr->index()));
+ RECURSE_EXPRESSION(Visit(expr->value()));
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
EmptyParentheses* expr) {
PROCESS_EXPRESSION(expr);
@@ -536,6 +545,15 @@ void AstTraversalVisitor<Subclass>::VisitGetTemplateObject(
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitTemplateLiteral(
+ TemplateLiteral* expr) {
+ PROCESS_EXPRESSION(expr);
+ for (Expression* sub : *expr->substitutions()) {
+ RECURSE_EXPRESSION(Visit(sub));
+ }
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitImportCallExpression(
ImportCallExpression* expr) {
PROCESS_EXPRESSION(expr);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 6a3aea5fa0..a453455dd0 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -32,8 +32,8 @@
#include "src/base/hashmap.h"
#include "src/conversions.h"
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/utils.h"
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 2856abb40c..392af8a501 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -211,6 +211,23 @@ Assignment::Assignment(NodeType node_type, Token::Value op, Expression* target,
bit_field_ |= TokenField::encode(op);
}
+void FunctionLiteral::set_inferred_name(Handle<String> inferred_name) {
+ DCHECK(!inferred_name.is_null());
+ inferred_name_ = inferred_name;
+ DCHECK(raw_inferred_name_ == nullptr || raw_inferred_name_->IsEmpty());
+ raw_inferred_name_ = nullptr;
+ scope()->set_has_inferred_function_name(true);
+}
+
+void FunctionLiteral::set_raw_inferred_name(
+ const AstConsString* raw_inferred_name) {
+ DCHECK_NOT_NULL(raw_inferred_name);
+ raw_inferred_name_ = raw_inferred_name;
+ DCHECK(inferred_name_.is_null());
+ inferred_name_ = Handle<String>();
+ scope()->set_has_inferred_function_name(true);
+}
+
bool FunctionLiteral::ShouldEagerCompile() const {
return scope()->ShouldEagerCompile();
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 661c5b7293..b95d54abb9 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -11,8 +11,8 @@
#include "src/ast/modules.h"
#include "src/ast/variables.h"
#include "src/bailout-reason.h"
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/label.h"
#include "src/objects/literal-objects.h"
@@ -97,8 +97,10 @@ namespace internal {
V(ResolvedProperty) \
V(RewritableExpression) \
V(Spread) \
+ V(StoreInArrayLiteral) \
V(SuperCallReference) \
V(SuperPropertyReference) \
+ V(TemplateLiteral) \
V(ThisFunction) \
V(Throw) \
V(UnaryOperation) \
@@ -1946,6 +1948,29 @@ class Spread final : public Expression {
Expression* expression_;
};
+// The StoreInArrayLiteral node corresponds to the StaInArrayLiteral bytecode.
+// It is used in the rewriting of destructuring assignments that contain an
+// array rest pattern.
+class StoreInArrayLiteral final : public Expression {
+ public:
+ Expression* array() const { return array_; }
+ Expression* index() const { return index_; }
+ Expression* value() const { return value_; }
+
+ private:
+ friend class AstNodeFactory;
+
+ StoreInArrayLiteral(Expression* array, Expression* index, Expression* value,
+ int position)
+ : Expression(position, kStoreInArrayLiteral),
+ array_(array),
+ index_(index),
+ value_(value) {}
+
+ Expression* array_;
+ Expression* index_;
+ Expression* value_;
+};
class Conditional final : public Expression {
public:
@@ -2221,23 +2246,11 @@ class FunctionLiteral final : public Expression {
}
UNREACHABLE();
}
-
- // Only one of {set_inferred_name, set_raw_inferred_name} should be called.
- void set_inferred_name(Handle<String> inferred_name) {
- DCHECK(!inferred_name.is_null());
- inferred_name_ = inferred_name;
- DCHECK(raw_inferred_name_ == nullptr || raw_inferred_name_->IsEmpty());
- raw_inferred_name_ = nullptr;
- }
-
const AstConsString* raw_inferred_name() { return raw_inferred_name_; }
- void set_raw_inferred_name(const AstConsString* raw_inferred_name) {
- DCHECK_NOT_NULL(raw_inferred_name);
- raw_inferred_name_ = raw_inferred_name;
- DCHECK(inferred_name_.is_null());
- inferred_name_ = Handle<String>();
- }
+ // Only one of {set_inferred_name, set_raw_inferred_name} should be called.
+ void set_inferred_name(Handle<String> inferred_name);
+ void set_raw_inferred_name(const AstConsString* raw_inferred_name);
bool pretenure() const { return Pretenure::decode(bit_field_); }
void set_pretenure() { bit_field_ = Pretenure::update(bit_field_, true); }
@@ -2279,7 +2292,9 @@ class FunctionLiteral final : public Expression {
void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
int return_position() {
- return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
+ return std::max(
+ start_position(),
+ end_position() - (HasBracesField::decode(bit_field_) ? 1 : 0));
}
int function_literal_id() const { return function_literal_id_; }
@@ -2315,19 +2330,19 @@ class FunctionLiteral final : public Expression {
function_length_(function_length),
function_token_position_(kNoSourcePosition),
suspend_count_(0),
- has_braces_(has_braces),
+ function_literal_id_(function_literal_id),
raw_name_(name ? ast_value_factory->NewConsString(name) : nullptr),
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_cons_string()),
- function_literal_id_(function_literal_id),
produced_preparsed_scope_data_(produced_preparsed_scope_data) {
bit_field_ |= FunctionTypeBits::encode(function_type) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
- RequiresInstanceFieldsInitializer::encode(false);
+ RequiresInstanceFieldsInitializer::encode(false) |
+ HasBracesField::encode(has_braces);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
DCHECK_EQ(body == nullptr, expected_property_count < 0);
}
@@ -2340,20 +2355,21 @@ class FunctionLiteral final : public Expression {
: public BitField<BailoutReason, HasDuplicateParameters::kNext, 8> {};
class RequiresInstanceFieldsInitializer
: public BitField<bool, DontOptimizeReasonField::kNext, 1> {};
+ class HasBracesField
+ : public BitField<bool, RequiresInstanceFieldsInitializer::kNext, 1> {};
int expected_property_count_;
int parameter_count_;
int function_length_;
int function_token_position_;
int suspend_count_;
- bool has_braces_;
+ int function_literal_id_;
const AstConsString* raw_name_;
DeclarationScope* scope_;
ZoneList<Statement*>* body_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
- int function_literal_id_;
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
};
@@ -2650,6 +2666,26 @@ class GetTemplateObject final : public Expression {
const ZoneList<const AstRawString*>* raw_strings_;
};
+class TemplateLiteral final : public Expression {
+ public:
+ using StringList = ZoneList<const AstRawString*>;
+ using ExpressionList = ZoneList<Expression*>;
+
+ const StringList* string_parts() const { return string_parts_; }
+ const ExpressionList* substitutions() const { return substitutions_; }
+
+ private:
+ friend class AstNodeFactory;
+ TemplateLiteral(const StringList* parts, const ExpressionList* substitutions,
+ int pos)
+ : Expression(pos, kTemplateLiteral),
+ string_parts_(parts),
+ substitutions_(substitutions) {}
+
+ const StringList* string_parts_;
+ const ExpressionList* substitutions_;
+};
+
// ----------------------------------------------------------------------------
// Basic visitor
// Sub-class should parametrize AstVisitor with itself, e.g.:
@@ -3067,6 +3103,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Spread(expression, pos, expr_pos);
}
+ StoreInArrayLiteral* NewStoreInArrayLiteral(Expression* array,
+ Expression* index,
+ Expression* value, int pos) {
+ return new (zone_) StoreInArrayLiteral(array, index, value, pos);
+ }
+
Conditional* NewConditional(Expression* condition,
Expression* then_expression,
Expression* else_expression,
@@ -3226,6 +3268,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
+ TemplateLiteral* NewTemplateLiteral(
+ const ZoneList<const AstRawString*>* string_parts,
+ const ZoneList<Expression*>* substitutions, int pos) {
+ return new (zone_) TemplateLiteral(string_parts, substitutions, pos);
+ }
+
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {
return new (zone_) ImportCallExpression(args, pos);
}
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
index 4345e774ee..f21759ab7d 100644
--- a/deps/v8/src/ast/compile-time-value.cc
+++ b/deps/v8/src/ast/compile-time-value.cc
@@ -5,8 +5,8 @@
#include "src/ast/compile-time-value.h"
#include "src/ast/ast.h"
-#include "src/factory.h"
#include "src/handles-inl.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index d898a70479..2ca75e3c31 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -442,6 +442,11 @@ void CallPrinter::VisitSpread(Spread* node) {
Print(")");
}
+void CallPrinter::VisitStoreInArrayLiteral(StoreInArrayLiteral* node) {
+ Find(node->array());
+ Find(node->index());
+ Find(node->value());
+}
void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
@@ -466,6 +471,12 @@ void CallPrinter::VisitGetIterator(GetIterator* node) {
void CallPrinter::VisitGetTemplateObject(GetTemplateObject* node) {}
+void CallPrinter::VisitTemplateLiteral(TemplateLiteral* node) {
+ for (Expression* substitution : *node->substitutions()) {
+ Find(substitution, true);
+ }
+}
+
void CallPrinter::VisitImportCallExpression(ImportCallExpression* node) {
Print("ImportCall(");
Find(node->argument(), true);
@@ -1318,6 +1329,12 @@ void AstPrinter::VisitSpread(Spread* node) {
Visit(node->expression());
}
+void AstPrinter::VisitStoreInArrayLiteral(StoreInArrayLiteral* node) {
+ IndentedScope indent(this, "STORE IN ARRAY LITERAL", node->position());
+ PrintIndentedVisit("ARRAY", node->array());
+ PrintIndentedVisit("INDEX", node->index());
+ PrintIndentedVisit("VALUE", node->value());
+}
void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position());
@@ -1332,6 +1349,19 @@ void AstPrinter::VisitGetTemplateObject(GetTemplateObject* node) {
IndentedScope indent(this, "GET-TEMPLATE-OBJECT", node->position());
}
+void AstPrinter::VisitTemplateLiteral(TemplateLiteral* node) {
+ IndentedScope indent(this, "TEMPLATE-LITERAL", node->position());
+ const AstRawString* string = node->string_parts()->first();
+ if (!string->IsEmpty()) PrintLiteralIndented("SPAN", string, true);
+ for (int i = 0; i < node->substitutions()->length();) {
+ PrintIndentedVisit("EXPR", node->substitutions()->at(i++));
+ if (i < node->string_parts()->length()) {
+ string = node->string_parts()->at(i);
+ if (!string->IsEmpty()) PrintLiteralIndented("SPAN", string, true);
+ }
+ }
+}
+
void AstPrinter::VisitImportCallExpression(ImportCallExpression* node) {
IndentedScope indent(this, "IMPORT-CALL", node->position());
Visit(node->argument());
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index a87e756a0e..2c1355ead1 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -1492,6 +1492,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
unresolved_ = nullptr;
sloppy_block_function_map_ = nullptr;
rare_data_ = nullptr;
+ has_rest_ = false;
if (aborted) {
// Prepare scope for use in the outer zone.
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 2ffaaf6752..c95e3a380a 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -590,8 +590,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Variable* LookupRecursive(ParseInfo* info, VariableProxy* proxy,
Scope* outer_scope_end);
void ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var);
- MUST_USE_RESULT bool ResolveVariable(ParseInfo* info, VariableProxy* proxy);
- MUST_USE_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
+ V8_WARN_UNUSED_RESULT bool ResolveVariable(ParseInfo* info,
+ VariableProxy* proxy);
+ V8_WARN_UNUSED_RESULT bool ResolveVariablesRecursively(ParseInfo* info);
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
@@ -854,7 +855,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Returns false if private fields can not be resolved and
// ParseInfo's pending_error_handler will be populated with an
// error. Otherwise, returns true.
- MUST_USE_RESULT
+ V8_WARN_UNUSED_RESULT
static bool Analyze(ParseInfo* info);
// To be called during parsing. Do just enough scope analysis that we can
@@ -901,6 +902,14 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
is_skipped_function_ = is_skipped_function;
}
+ bool has_inferred_function_name() const {
+ return has_inferred_function_name_;
+ }
+ void set_has_inferred_function_name(bool value) {
+ DCHECK(is_function_scope());
+ has_inferred_function_name_ = value;
+ }
+
// Save data describing the context allocation of the variables in this scope
// and its subscopes (except scopes at the laziness boundary). The data is
// saved in produced_preparsed_scope_data_.
@@ -952,6 +961,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool is_being_lazily_parsed_ : 1;
#endif
bool is_skipped_function_ : 1;
+ bool has_inferred_function_name_ : 1;
// Parameter list in source order.
ZoneList<Variable*> params_;
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 51e3708b6a..d29693e3f8 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -32,6 +32,7 @@ namespace internal {
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
+ V(kInvalidSharedFunctionInfoData, "Invalid SharedFunctionInfo data") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \
V(kObjectTagged, "The object is tagged") \
@@ -39,11 +40,14 @@ namespace internal {
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsASmiAndNotABoundFunction, \
"Operand is a smi and not a bound function") \
+ V(kOperandIsASmiAndNotAConstructor, \
+ "Operand is a smi and not a constructor") \
V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAGeneratorObject, \
"Operand is a smi and not a generator object") \
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
+ V(kOperandIsNotAConstructor, "Operand is not a constructor") \
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
deleted file mode 100644
index 0fcf818069..0000000000
--- a/deps/v8/src/base.isolate
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'includes': [
- '../third_party/icu/icu.isolate',
-
- # MSVS runtime libraries.
- '../gni/msvs_dependencies.isolate',
- ],
- 'conditions': [
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
- 'variables': {
- 'files': [
- '<(PRODUCT_DIR)/natives_blob.bin',
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
- },
- }],
- ['tsan==1', {
- 'variables': {
- 'files': [
- '../tools/sanitizers/tsan_suppressions.txt',
- ],
- },
- }],
- ['asan==1 or cfi_vptr==1 or msan==1 or tsan==1 or ubsan_vptr==1', {
- 'variables': {
- 'files': [
- '../third_party/llvm-build/Release+Asserts/bin/llvm-symbolizer<(EXECUTABLE_SUFFIX)',
- ],
- },
- }],
- # Workaround for https://code.google.com/p/swarming/issues/detail?id=211
- ['asan==0 or cfi_vptr==0 or msan==0 or tsan==0 or ubsan_vptr==0 ', {
- 'variables': {},
- }],
- ],
-}
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 1858caa047..75f89298f1 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -16,16 +16,6 @@
#define ALLOW_UNUSED_TYPE
#endif
-
-// Annotate a function indicating the caller must examine the return value.
-// Use like:
-// int foo() WARN_UNUSED_RESULT;
-#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
-#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-#else
-#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
-#endif
-
// Tell the compiler a function is using a printf-style format string.
// |format_param| is the one-based index of the format string parameter;
// |dots_param| is the one-based index of the "..." parameter.
diff --git a/deps/v8/src/base/debug/stack_trace_posix.cc b/deps/v8/src/base/debug/stack_trace_posix.cc
index ec3add1682..51b821bdd1 100644
--- a/deps/v8/src/base/debug/stack_trace_posix.cc
+++ b/deps/v8/src/base/debug/stack_trace_posix.cc
@@ -72,6 +72,7 @@ const char kMangledSymbolPrefix[] = "_Z";
const char kSymbolCharacters[] =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+#if HAVE_EXECINFO_H
// Demangles C++ symbols in the given text. Example:
//
// "out/Debug/base_unittests(_ZN10StackTraceC1Ev+0x20) [0x817778c]"
@@ -81,7 +82,6 @@ void DemangleSymbols(std::string* text) {
// Note: code in this function is NOT async-signal safe (std::string uses
// malloc internally).
-#if HAVE_EXECINFO_H
std::string::size_type search_from = 0;
while (search_from < text->size()) {
@@ -117,9 +117,8 @@ void DemangleSymbols(std::string* text) {
search_from = mangled_start + 2;
}
}
-
-#endif // HAVE_EXECINFO_H
}
+#endif // HAVE_EXECINFO_H
class BacktraceOutputHandler {
public:
@@ -129,6 +128,7 @@ class BacktraceOutputHandler {
virtual ~BacktraceOutputHandler() {}
};
+#if HAVE_EXECINFO_H
void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
// This should be more than enough to store a 64-bit number in hex:
// 16 hex digits + 1 for null-terminator.
@@ -139,7 +139,6 @@ void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
handler->HandleOutput(buf);
}
-#if HAVE_EXECINFO_H
void ProcessBacktrace(void* const* trace, size_t size,
BacktraceOutputHandler* handler) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 7a7e4f5168..6b22131233 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -163,24 +163,11 @@ void DisableSignalStackDump() {
g_dump_stack_in_signal_handler = false;
}
-// Disable optimizations for the StackTrace::StackTrace function. It is
-// important to disable at least frame pointer optimization ("y"), since
-// that breaks CaptureStackBackTrace() and prevents StackTrace from working
-// in Release builds (it may still be janky if other frames are using FPO,
-// but at least it will make it further).
-#if defined(V8_CC_MSVC)
-#pragma optimize("", off)
-#endif
-
StackTrace::StackTrace() {
// When walking our own stack, use CaptureStackBackTrace().
count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, nullptr);
}
-#if defined(V8_CC_MSVC)
-#pragma optimize("", on)
-#endif
-
StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {
InitTrace(exception_pointers->ContextRecord);
}
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 6bdb69319d..3bec12cb88 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -75,49 +75,49 @@ class Flags final {
mask_type mask_;
};
-
-#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
- inline Type operator&( \
- Type::flag_type lhs, \
- Type::flag_type rhs)ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) & rhs; \
- } \
- inline Type operator&(Type::flag_type lhs, \
- const Type& rhs)ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator&(Type::flag_type lhs, const Type& rhs) { \
- return rhs & lhs; \
- } \
- inline void operator&(Type::flag_type lhs, \
- Type::mask_type rhs)ALLOW_UNUSED_TYPE; \
- inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
- inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \
- ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) | rhs; \
- } \
- inline Type operator|(Type::flag_type lhs, const Type& rhs) \
- ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator|(Type::flag_type lhs, const Type& rhs) { \
- return rhs | lhs; \
- } \
- inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
- ALLOW_UNUSED_TYPE; \
- inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
- inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \
- ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) ^ rhs; \
- } inline Type \
- operator^(Type::flag_type lhs, const Type& rhs) \
- ALLOW_UNUSED_TYPE WARN_UNUSED_RESULT; \
- inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
- return rhs ^ lhs; \
- } inline void \
- operator^(Type::flag_type lhs, Type::mask_type rhs) ALLOW_UNUSED_TYPE; \
- inline void operator^(Type::flag_type lhs, Type::mask_type rhs) { \
- } inline Type \
- operator~(Type::flag_type val)ALLOW_UNUSED_TYPE; \
+#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
+ inline Type operator&( \
+ Type::flag_type lhs, \
+ Type::flag_type rhs)ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) & rhs; \
+ } \
+ inline Type operator&( \
+ Type::flag_type lhs, \
+ const Type& rhs)ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator&(Type::flag_type lhs, const Type& rhs) { \
+ return rhs & lhs; \
+ } \
+ inline void operator&(Type::flag_type lhs, \
+ Type::mask_type rhs)ALLOW_UNUSED_TYPE; \
+ inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \
+ inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \
+ ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) | rhs; \
+ } \
+ inline Type operator|(Type::flag_type lhs, const Type& rhs) \
+ ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator|(Type::flag_type lhs, const Type& rhs) { \
+ return rhs | lhs; \
+ } \
+ inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \
+ ALLOW_UNUSED_TYPE; \
+ inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \
+ inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \
+ ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) ^ rhs; \
+ } \
+ inline Type operator^(Type::flag_type lhs, const Type& rhs) \
+ ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \
+ inline Type operator^(Type::flag_type lhs, const Type& rhs) { \
+ return rhs ^ lhs; \
+ } \
+ inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \
+ ALLOW_UNUSED_TYPE; \
+ inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {} \
+ inline Type operator~(Type::flag_type val)ALLOW_UNUSED_TYPE; \
inline Type operator~(Type::flag_type val) { return ~Type(val); }
} // namespace base
diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc
index 54f7e2e6aa..95b84cf328 100644
--- a/deps/v8/src/base/ieee754.cc
+++ b/deps/v8/src/base/ieee754.cc
@@ -159,11 +159,11 @@ typedef union {
#define STRICT_ASSIGN(type, lval, rval) ((lval) = (rval))
-int32_t __ieee754_rem_pio2(double x, double *y) WARN_UNUSED_RESULT;
-double __kernel_cos(double x, double y) WARN_UNUSED_RESULT;
-int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec,
- const int32_t *ipio2) WARN_UNUSED_RESULT;
-double __kernel_sin(double x, double y, int iy) WARN_UNUSED_RESULT;
+int32_t __ieee754_rem_pio2(double x, double* y) V8_WARN_UNUSED_RESULT;
+double __kernel_cos(double x, double y) V8_WARN_UNUSED_RESULT;
+int __kernel_rem_pio2(double* x, double* y, int e0, int nx, int prec,
+ const int32_t* ipio2) V8_WARN_UNUSED_RESULT;
+double __kernel_sin(double x, double y, int iy) V8_WARN_UNUSED_RESULT;
/* __ieee754_rem_pio2(x,y)
*
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index a21bc5e423..baf6b12ccb 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -106,11 +106,25 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
// Define PrintCheckOperand<T> for each T which defines operator<< for ostream.
template <typename T>
-typename std::enable_if<has_output_operator<T>::value>::type PrintCheckOperand(
- std::ostream& os, T val) {
+typename std::enable_if<
+ !std::is_function<typename std::remove_pointer<T>::type>::value &&
+ has_output_operator<T>::value>::type
+PrintCheckOperand(std::ostream& os, T val) {
os << std::forward<T>(val);
}
+// Provide an overload for functions and function pointers. Function pointers
+// don't implicitly convert to void* but do implicitly convert to bool, so
+// without this function pointers are always printed as 1 or 0. (MSVC isn't
+// standards-conforming here and converts function pointers to regular
+// pointers, so this is a no-op for MSVC.)
+template <typename T>
+typename std::enable_if<
+ std::is_function<typename std::remove_pointer<T>::type>::value>::type
+PrintCheckOperand(std::ostream& os, T val) {
+ os << reinterpret_cast<const void*>(val);
+}
+
// Define PrintCheckOperand<T> for enums which have no operator<<.
template <typename T>
typename std::enable_if<std::is_enum<T>::value &&
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 9de42131a4..db2f194591 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -150,11 +150,6 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define INLINE(declarator) V8_INLINE declarator
#define NO_INLINE(declarator) V8_NOINLINE declarator
-
-// Newly written code should use WARN_UNUSED_RESULT.
-#define MUST_USE_RESULT WARN_UNUSED_RESULT
-
-
// Define V8_USE_ADDRESS_SANITIZER macros.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)
@@ -196,12 +191,68 @@ V8_INLINE Dest bit_cast(Source const& source) {
// TODO(all) Replace all uses of this macro with static_assert, remove macro.
#define STATIC_ASSERT(test) static_assert(test, #test)
-// TODO(rongjie) Remove this workaround once we require gcc >= 5.0
-#if __GNUG__ && __GNUC__ < 5
-#define IS_TRIVIALLY_COPYABLE(T) \
- (__has_trivial_copy(T) && __has_trivial_destructor(T))
+namespace v8 {
+namespace base {
+
+// Note that some implementations of std::is_trivially_copyable mandate that at
+// least one of the copy constructor, move constructor, copy assignment or move
+// assignment is non-deleted, while others do not. Be aware that also
+// base::is_trivially_copyable will differ for these cases.
+template <typename T>
+struct is_trivially_copyable {
+#if V8_CC_MSVC
+ // Unfortunately, MSVC 2015 is broken in that std::is_trivially_copyable can
+ // be false even though it should be true according to the standard.
+ // (status at 2018-02-26, observed on the msvc waterfall bot).
+ // Interestingly, the lower-level primitives used below are working as
+ // intended, so we reimplement this according to the standard.
+ // See also https://developercommunity.visualstudio.com/content/problem/
+ // 170883/msvc-type-traits-stdis-trivial-is-bugged.html.
+ static constexpr bool value =
+ // Copy constructor is trivial or deleted.
+ (std::is_trivially_copy_constructible<T>::value ||
+ !std::is_copy_constructible<T>::value) &&
+ // Copy assignment operator is trivial or deleted.
+ (std::is_trivially_copy_assignable<T>::value ||
+ !std::is_copy_assignable<T>::value) &&
+ // Move constructor is trivial or deleted.
+ (std::is_trivially_move_constructible<T>::value ||
+ !std::is_move_constructible<T>::value) &&
+ // Move assignment operator is trivial or deleted.
+ (std::is_trivially_move_assignable<T>::value ||
+ !std::is_move_assignable<T>::value) &&
+ // (Some implementations mandate that one of the above is non-deleted, but
+ // the standard does not, so let's skip this check.)
+ // Trivial non-deleted destructor.
+ std::is_trivially_destructible<T>::value;
+
+#elif defined(__GNUC__) && __GNUC__ < 5
+ // WARNING:
+ // On older libstdc++ versions, there is no way to correctly implement
+ // is_trivially_copyable. The workaround below is an approximation (neither
+ // over- nor underapproximation). E.g. it wrongly returns true if the move
+ // constructor is non-trivial, and it wrongly returns false if the copy
+ // constructor is deleted, but copy assignment is trivial.
+ // TODO(rongjie) Remove this workaround once we require gcc >= 5.0
+ static constexpr bool value =
+ __has_trivial_copy(T) && __has_trivial_destructor(T);
+
#else
-#define IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value
+ static constexpr bool value = std::is_trivially_copyable<T>::value;
+#endif
+};
+#if defined(__GNUC__) && __GNUC__ < 5
+// On older libstdc++ versions, base::is_trivially_copyable<T>::value is only an
+// approximation (see above), so make ASSERT_{NOT_,}TRIVIALLY_COPYABLE a noop.
+#define ASSERT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled")
+#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) static_assert(true, "check disabled")
+#else
+#define ASSERT_TRIVIALLY_COPYABLE(T) \
+ static_assert(::v8::base::is_trivially_copyable<T>::value, \
+ #T " should be trivially copyable")
+#define ASSERT_NOT_TRIVIALLY_COPYABLE(T) \
+ static_assert(!::v8::base::is_trivially_copyable<T>::value, \
+ #T " should not be trivially copyable")
#endif
// The USE(x, ...) template is used to silence C++ compiler warnings
@@ -211,12 +262,15 @@ struct Use {
template <typename T>
Use(T&&) {} // NOLINT(runtime/explicit)
};
-#define USE(...) \
- do { \
- ::Use unused_tmp_array_for_use_macro[]{__VA_ARGS__}; \
- (void)unused_tmp_array_for_use_macro; \
+#define USE(...) \
+ do { \
+ ::v8::base::Use unused_tmp_array_for_use_macro[]{__VA_ARGS__}; \
+ (void)unused_tmp_array_for_use_macro; \
} while (false)
+} // namespace base
+} // namespace v8
+
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 30c19612aa..af00c6e5d5 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -57,7 +57,7 @@ class V8_BASE_EXPORT ConditionVariable final {
// spuriously. When unblocked, regardless of the reason, the lock on the mutex
// is reacquired and |WaitFor()| exits. Returns true if the condition variable
// was notified prior to the timeout.
- bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+ bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 25f85b907e..59b653d6cd 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -51,7 +51,7 @@ class V8_BASE_EXPORT Mutex final {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
- bool TryLock() WARN_UNUSED_RESULT;
+ bool TryLock() V8_WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
@@ -150,7 +150,7 @@ class V8_BASE_EXPORT RecursiveMutex final {
// Tries to lock the given mutex. Returns whether the mutex was
// successfully locked.
- bool TryLock() WARN_UNUSED_RESULT;
+ bool TryLock() V8_WARN_UNUSED_RESULT;
// The implementation-defined native handle type.
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 39559552bb..b4bba251aa 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -39,21 +39,21 @@ namespace base {
class AIXTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
- double LocalTimeOffset() override;
+ double LocalTimeOffset(double time_ms, bool is_utc) override;
~AIXTimezoneCache() override {}
};
-const char* AIXTimezoneCache::LocalTimezone(double time) {
- if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(floor(time / msPerSecond));
+const char* AIXTimezoneCache::LocalTimezone(double time_ms) {
+ if (std::isnan(time_ms)) return "";
+ time_t tv = static_cast<time_t>(floor(time_ms / msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
if (nullptr == t) return "";
return tzname[0]; // The location of the timezone string on AIX.
}
-double AIXTimezoneCache::LocalTimeOffset() {
+double AIXTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
// On AIX, struct tm does not contain a tm_gmtoff field.
time_t utc = time(nullptr);
DCHECK_NE(utc, -1);
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index 0d4ec9a10d..ddcdc1a2d3 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -66,7 +66,7 @@ uint8_t* RandomizedVirtualAlloc(size_t size, DWORD flags, DWORD protect,
class CygwinTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
- double LocalTimeOffset() override;
+ double LocalTimeOffset(double time_ms, bool is_utc) override;
~CygwinTimezoneCache() override {}
};
@@ -80,7 +80,7 @@ const char* CygwinTimezoneCache::LocalTimezone(double time) {
return tzname[0]; // The location of the timezone string on Cygwin.
}
-double CygwinTimezoneCache::LocalTimeOffset() {
+double LocalTimeOffset(double time_ms, bool is_utc) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(nullptr);
DCHECK_NE(utc, -1);
diff --git a/deps/v8/src/base/platform/platform-posix-time.cc b/deps/v8/src/base/platform/platform-posix-time.cc
index 54618810c2..28e6431baf 100644
--- a/deps/v8/src/base/platform/platform-posix-time.cc
+++ b/deps/v8/src/base/platform/platform-posix-time.cc
@@ -18,7 +18,9 @@ const char* PosixDefaultTimezoneCache::LocalTimezone(double time) {
return t->tm_zone;
}
-double PosixDefaultTimezoneCache::LocalTimeOffset() {
+double PosixDefaultTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
+ // Preserve the old behavior for non-ICU implementation by ignoring both
+ // time_ms and is_utc.
time_t tv = time(nullptr);
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
diff --git a/deps/v8/src/base/platform/platform-posix-time.h b/deps/v8/src/base/platform/platform-posix-time.h
index 3fc1bfd900..4d3373715b 100644
--- a/deps/v8/src/base/platform/platform-posix-time.h
+++ b/deps/v8/src/base/platform/platform-posix-time.h
@@ -13,7 +13,7 @@ namespace base {
class PosixDefaultTimezoneCache : public PosixTimezoneCache {
public:
const char* LocalTimezone(double time_ms) override;
- double LocalTimeOffset() override;
+ double LocalTimeOffset(double time_ms, bool is_utc) override;
~PosixDefaultTimezoneCache() override {}
};
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index f85f7fe942..fee67589b6 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -491,6 +491,13 @@ int OS::GetCurrentThreadId() {
#endif
}
+void OS::ExitProcess(int exit_code) {
+ // Use _exit instead of exit to avoid races between isolate
+ // threads and static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ _exit(exit_code);
+}
// ----------------------------------------------------------------------------
// POSIX date/time support.
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index b81895a3fb..477149db1b 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -37,8 +37,7 @@ namespace base {
class SolarisTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time) override;
- double LocalTimeOffset() override;
-
+ double LocalTimeOffset(double time, bool is_utc) override;
~SolarisTimezoneCache() override {}
};
@@ -51,7 +50,7 @@ const char* SolarisTimezoneCache::LocalTimezone(double time) {
return tzname[0]; // The location of the timezone string on Solaris.
}
-double SolarisTimezoneCache::LocalTimeOffset() {
+double SolarisTimezoneCache::LocalTimeOffset(double time, bool is_utc) {
tzset();
return -static_cast<double>(timezone * msPerSecond);
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 3f1a586840..d4aa44f8a7 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -27,6 +27,10 @@
#include "src/base/timezone-cache.h"
#include "src/base/utils/random-number-generator.h"
+#if defined(_MSC_VER)
+#include <crtdbg.h> // NOLINT
+#endif // defined(_MSC_VER)
+
// Extra functions for MinGW. Most of these are the _s functions which are in
// the Microsoft Visual Studio C++ CRT.
#ifdef __MINGW32__
@@ -111,7 +115,7 @@ class WindowsTimezoneCache : public TimezoneCache {
const char* LocalTimezone(double time) override;
- double LocalTimeOffset() override;
+ double LocalTimeOffset(double time, bool is_utc) override;
double DaylightSavingsOffset(double time) override;
@@ -462,7 +466,9 @@ const char* WindowsTimezoneCache::LocalTimezone(double time) {
// Returns the local time offset in milliseconds east of UTC without
// taking daylight savings time into account.
-double WindowsTimezoneCache::LocalTimeOffset() {
+double WindowsTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
+ // Ignore is_utc and time_ms for now. That way, the behavior wouldn't
+ // change with icu_timezone_data disabled.
// Use current time, rounded to the millisecond.
Win32Time t(OS::TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
@@ -493,6 +499,13 @@ int OS::GetCurrentThreadId() {
return static_cast<int>(::GetCurrentThreadId());
}
+void OS::ExitProcess(int exit_code) {
+ // Use TerminateProcess avoid races between isolate threads and
+ // static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ TerminateProcess(GetCurrentProcess(), exit_code);
+}
// ----------------------------------------------------------------------------
// Win32 console output.
@@ -1239,6 +1252,24 @@ int OS::ActivationFrameAlignment() {
#endif
}
+#if (defined(_WIN32) || defined(_WIN64))
+void EnsureConsoleOutputWin32() {
+ UINT new_flags =
+ SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
+ UINT existing_flags = SetErrorMode(new_flags);
+ SetErrorMode(existing_flags | new_flags);
+#if defined(_MSC_VER)
+ _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+ _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+ _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+ _set_error_mode(_OUT_TO_STDERR);
+#endif // defined(_MSC_VER)
+}
+#endif // (defined(_WIN32) || defined(_WIN64))
+
// ----------------------------------------------------------------------------
// Win32 thread support.
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 8a4545c607..4fbc87c4aa 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -245,6 +245,8 @@ class V8_BASE_EXPORT OS {
static int GetCurrentThreadId();
+ static void ExitProcess(int exit_code);
+
private:
// These classes use the private memory management API below.
friend class MemoryMappedFile;
@@ -279,6 +281,18 @@ class V8_BASE_EXPORT OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
+#if (defined(_WIN32) || defined(_WIN64))
+V8_BASE_EXPORT void EnsureConsoleOutputWin32();
+#endif // (defined(_WIN32) || defined(_WIN64))
+
+inline void EnsureConsoleOutput() {
+#if (defined(_WIN32) || defined(_WIN64))
+ // Windows requires extra calls to send assert output to the console
+ // rather than a dialog box.
+ EnsureConsoleOutputWin32();
+#endif // (defined(_WIN32) || defined(_WIN64))
+}
+
// ----------------------------------------------------------------------------
// Thread
//
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 31aeca3d9b..62c9c93988 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -47,7 +47,7 @@ class V8_BASE_EXPORT Semaphore final {
// Like Wait() but returns after rel_time time has passed. If the timeout
// happens the return value is false and the counter is unchanged. Otherwise
// the semaphore counter is decremented and true is returned.
- bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+ bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
#if V8_OS_MACOSX
typedef semaphore_t NativeHandle;
diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h
index 18b50fe70c..07356346ec 100644
--- a/deps/v8/src/base/template-utils.h
+++ b/deps/v8/src/base/template-utils.h
@@ -79,20 +79,13 @@ struct pass_value_or_ref {
decay_t, const decay_t&>::type;
};
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct has_output_operator : std::false_type {};
template <typename T>
-struct has_output_operator {
- // This template is only instantiable if U provides operator<< with ostream.
- // Its return type is uint8_t.
- template <typename U>
- static auto __check_operator(U u)
- -> decltype(*(std::ostream*)nullptr << *u, uint8_t{0});
- // This is a fallback implementation, returning uint16_t. If the template
- // above is instantiable, is has precedence over this varargs function.
- static uint16_t __check_operator(...);
-
- using ptr_t = typename std::add_pointer<T>::type;
- static constexpr bool value = sizeof(__check_operator(ptr_t{nullptr})) == 1;
-};
+struct has_output_operator<T, decltype(void(std::declval<std::ostream&>()
+ << std::declval<T>()))>
+ : std::true_type {};
namespace detail {
diff --git a/deps/v8/src/base/timezone-cache.h b/deps/v8/src/base/timezone-cache.h
index ff9fde4d15..96ad7bb41f 100644
--- a/deps/v8/src/base/timezone-cache.h
+++ b/deps/v8/src/base/timezone-cache.h
@@ -20,10 +20,8 @@ class TimezoneCache {
// ES #sec-local-time-zone-adjustment
// Local Time Zone Adjustment
//
- // TODO(littledan): Make more accurate with another parameter along the
- // lines of this spec change:
// https://github.com/tc39/ecma262/pull/778
- virtual double LocalTimeOffset() = 0;
+ virtual double LocalTimeOffset(double time_ms, bool is_utc) = 0;
// Called when the local timezone changes
virtual void Clear() = 0;
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index 321ce861fb..b4b67970c7 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -50,9 +50,7 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// that one int value is pseudorandomly generated and returned.
// All 2^32 possible integer values are produced with (approximately) equal
// probability.
- V8_INLINE int NextInt() WARN_UNUSED_RESULT {
- return Next(32);
- }
+ V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT { return Next(32); }
// Returns a pseudorandom, uniformly distributed int value between 0
// (inclusive) and the specified max value (exclusive), drawn from this random
@@ -60,30 +58,28 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// one int value in the specified range is pseudorandomly generated and
// returned. All max possible int values are produced with (approximately)
// equal probability.
- int NextInt(int max) WARN_UNUSED_RESULT;
+ int NextInt(int max) V8_WARN_UNUSED_RESULT;
// Returns the next pseudorandom, uniformly distributed boolean value from
// this random number generator's sequence. The general contract of
// |NextBoolean()| is that one boolean value is pseudorandomly generated and
// returned. The values true and false are produced with (approximately) equal
// probability.
- V8_INLINE bool NextBool() WARN_UNUSED_RESULT {
- return Next(1) != 0;
- }
+ V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT { return Next(1) != 0; }
// Returns the next pseudorandom, uniformly distributed double value between
// 0.0 and 1.0 from this random number generator's sequence.
// The general contract of |NextDouble()| is that one double value, chosen
// (approximately) uniformly from the range 0.0 (inclusive) to 1.0
// (exclusive), is pseudorandomly generated and returned.
- double NextDouble() WARN_UNUSED_RESULT;
+ double NextDouble() V8_WARN_UNUSED_RESULT;
// Returns the next pseudorandom, uniformly distributed int64 value from this
// random number generator's sequence. The general contract of |NextInt64()|
// is that one 64-bit int value is pseudorandomly generated and returned.
// All 2^64 possible integer values are produced with (approximately) equal
// probability.
- int64_t NextInt64() WARN_UNUSED_RESULT;
+ int64_t NextInt64() V8_WARN_UNUSED_RESULT;
// Fills the elements of a specified array of bytes with random numbers.
void NextBytes(void* buffer, size_t buflen);
@@ -91,7 +87,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
// Returns the next pseudorandom set of n unique uint64 values smaller than
// max.
// n must be less or equal to max.
- std::vector<uint64_t> NextSample(uint64_t max, size_t n) WARN_UNUSED_RESULT;
+ std::vector<uint64_t> NextSample(uint64_t max,
+ size_t n) V8_WARN_UNUSED_RESULT;
// Returns the next pseudorandom set of n unique uint64 values smaller than
// max.
@@ -103,7 +100,7 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
std::vector<uint64_t> NextSampleSlow(
uint64_t max, size_t n,
const std::unordered_set<uint64_t>& excluded =
- std::unordered_set<uint64_t>{}) WARN_UNUSED_RESULT;
+ std::unordered_set<uint64_t>{}) V8_WARN_UNUSED_RESULT;
// Override the current ssed.
void SetSeed(int64_t seed);
@@ -136,7 +133,7 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
static const int64_t kAddend = 0xb;
static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
- int Next(int bits) WARN_UNUSED_RESULT;
+ int Next(int bits) V8_WARN_UNUSED_RESULT;
static uint64_t MurmurHash3(uint64_t);
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ff4beae7fd..43c7527de9 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -356,28 +356,21 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
namespace {
-// Non-construct case.
V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateSharedFunctionInfo(
Isolate* isolate, Builtins::Name builtin_id, Handle<String> name, int len) {
- Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
- const bool kNotConstructor = false;
- Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- name, code, kNotConstructor, kNormalFunction, builtin_id);
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(name, builtin_id,
+ kNormalFunction);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
}
-// Construct case.
-V8_NOINLINE Handle<SharedFunctionInfo>
-SimpleCreateConstructorSharedFunctionInfo(Isolate* isolate,
- Builtins::Name builtin_id,
- Handle<String> name, int len) {
- Handle<Code> code = isolate->builtins()->builtin_handle(builtin_id);
- const bool kIsConstructor = true;
- Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
- name, code, kIsConstructor, kNormalFunction, builtin_id);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
+V8_NOINLINE Handle<SharedFunctionInfo> SimpleCreateBuiltinSharedFunctionInfo(
+ Isolate* isolate, Builtins::Name builtin_id, Handle<String> name, int len) {
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForBuiltin(name, builtin_id,
+ kNormalFunction);
shared->set_internal_formal_parameter_count(len);
shared->set_length(len);
return shared;
@@ -402,14 +395,13 @@ V8_NOINLINE Handle<JSFunction> CreateFunction(
Isolate* isolate, Handle<String> name, InstanceType type, int instance_size,
int inobject_properties, MaybeHandle<Object> maybe_prototype,
Builtins::Name builtin_id) {
- Handle<Code> code(isolate->builtins()->builtin(builtin_id));
Handle<Object> prototype;
Handle<JSFunction> result;
if (maybe_prototype.ToHandle(&prototype)) {
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- name, code, prototype, type, instance_size, inobject_properties,
- builtin_id, IMMUTABLE);
+ name, prototype, type, instance_size, inobject_properties, builtin_id,
+ IMMUTABLE);
result = isolate->factory()->NewFunction(args);
// Make the JSFunction's prototype object fast.
@@ -417,7 +409,7 @@ V8_NOINLINE Handle<JSFunction> CreateFunction(
kStartAtReceiver, isolate);
} else {
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
- name, code, builtin_id, LanguageMode::kStrict);
+ name, builtin_id, LanguageMode::kStrict);
result = isolate->factory()->NewFunction(args);
}
@@ -608,20 +600,18 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the empty function as the prototype for function according to
// ES#sec-properties-of-the-function-prototype-object
- Handle<Code> code(BUILTIN_CODE(isolate, EmptyFunction));
- NewFunctionArgs args =
- NewFunctionArgs::ForBuiltin(factory->empty_string(), code,
- empty_function_map, Builtins::kEmptyFunction);
+ NewFunctionArgs args = NewFunctionArgs::ForBuiltin(
+ factory->empty_string(), empty_function_map, Builtins::kEmptyFunction);
Handle<JSFunction> empty_function = factory->NewFunction(args);
// --- E m p t y ---
Handle<String> source = factory->NewStringFromStaticChars("() {}");
Handle<Script> script = factory->NewScript(source);
script->set_type(Script::TYPE_NATIVE);
- Handle<FixedArray> infos = factory->NewFixedArray(2);
+ Handle<WeakFixedArray> infos = factory->NewWeakFixedArray(2);
script->set_shared_function_infos(*infos);
- empty_function->shared()->set_start_position(0);
- empty_function->shared()->set_end_position(source->length());
+ empty_function->shared()->set_raw_start_position(0);
+ empty_function->shared()->set_raw_end_position(source->length());
empty_function->shared()->set_function_literal_id(1);
empty_function->shared()->DontAdaptArguments();
SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
@@ -663,9 +653,8 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
return restricted_properties_thrower_;
}
Handle<String> name(factory()->empty_string());
- Handle<Code> code = BUILTIN_CODE(isolate(), StrictPoisonPillThrower);
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
- name, code, Builtins::kStrictPoisonPillThrower, i::LanguageMode::kStrict);
+ name, Builtins::kStrictPoisonPillThrower, i::LanguageMode::kStrict);
Handle<JSFunction> function = factory()->NewFunction(args);
function->shared()->DontAdaptArguments();
@@ -761,8 +750,6 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
inobject_properties, factory->null_value(), Builtins::kObjectConstructor);
object_fun->shared()->set_length(1);
object_fun->shared()->DontAdaptArguments();
- object_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate_, ObjectConstructor_ConstructStub));
native_context()->set_object_function(*object_fun);
{
@@ -1228,11 +1215,10 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
if (js_global_object_template.is_null()) {
Handle<String> name(factory()->empty_string());
- Handle<Code> code = BUILTIN_CODE(isolate(), Illegal);
Handle<JSObject> prototype =
factory()->NewFunctionPrototype(isolate()->object_function());
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize, 0,
+ name, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize, 0,
Builtins::kIllegal, MUTABLE);
js_global_object_function = factory()->NewFunction(args);
#ifdef DEBUG
@@ -1261,9 +1247,8 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
Handle<JSFunction> global_proxy_function;
if (global_proxy_template.IsEmpty()) {
Handle<String> name(factory()->empty_string());
- Handle<Code> code = BUILTIN_CODE(isolate(), Illegal);
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- name, code, factory()->the_hole_value(), JS_GLOBAL_PROXY_TYPE,
+ name, factory()->the_hole_value(), JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::SizeWithEmbedderFields(0), 0, Builtins::kIllegal,
MUTABLE);
global_proxy_function = factory()->NewFunction(args);
@@ -1343,8 +1328,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
global, name, JS_ERROR_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kErrorConstructor, DONT_ENUM);
error_fun->shared()->DontAdaptArguments();
- error_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, ErrorConstructor));
error_fun->shared()->set_length(1);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
@@ -1399,11 +1382,9 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
namespace {
void InstallMakeError(Isolate* isolate, int builtin_id, int context_index) {
- Handle<Code> code(isolate->builtins()->builtin(builtin_id));
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- isolate->factory()->empty_string(), code,
- isolate->factory()->the_hole_value(), JS_OBJECT_TYPE,
- JSObject::kHeaderSize, 0, builtin_id, MUTABLE);
+ isolate->factory()->empty_string(), isolate->factory()->the_hole_value(),
+ JS_OBJECT_TYPE, JSObject::kHeaderSize, 0, builtin_id, MUTABLE);
Handle<JSFunction> function = isolate->factory()->NewFunction(args);
function->shared()->DontAdaptArguments();
@@ -1553,8 +1534,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Function instances are sloppy by default.
function_fun->set_prototype_or_initial_map(*isolate->sloppy_function_map());
function_fun->shared()->DontAdaptArguments();
- function_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, FunctionConstructor));
function_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, function_fun,
Context::FUNCTION_FUNCTION_INDEX);
@@ -1626,6 +1605,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
PropertyAttributes attribs = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE);
+ STATIC_ASSERT(JSArray::kLengthDescriptorIndex == 0);
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
factory->length_string(), factory->array_length_accessor(), attribs);
@@ -1638,9 +1618,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
- ArrayConstructorStub array_constructor_stub(isolate);
- Handle<Code> code = array_constructor_stub.GetCode();
- array_function->shared()->SetConstructStub(*code);
// Set up %ArrayPrototype%.
// The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
@@ -1673,12 +1650,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(proto, "shift", Builtins::kArrayPrototypeShift, 0,
false);
SimpleInstallFunction(proto, "unshift", Builtins::kArrayUnshift, 1, false);
- if (FLAG_enable_experimental_builtins) {
- SimpleInstallFunction(proto, "slice", Builtins::kArrayPrototypeSlice, 2,
- false);
- } else {
- SimpleInstallFunction(proto, "slice", Builtins::kArraySlice, 2, false);
- }
+ SimpleInstallFunction(proto, "slice", Builtins::kArrayPrototypeSlice, 2,
+ false);
SimpleInstallFunction(proto, "splice", Builtins::kArraySplice, 2, false);
SimpleInstallFunction(proto, "includes", Builtins::kArrayIncludes, 1,
false);
@@ -1717,73 +1690,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kArrayIteratorPrototypeNext, 0, true,
kArrayIteratorNext);
- Handle<JSFunction> array_iterator_function = CreateFunction(
- isolate, factory->ArrayIterator_string(),
- JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
- array_iterator_prototype, Builtins::kIllegal);
+ Handle<JSFunction> array_iterator_function =
+ CreateFunction(isolate, factory->ArrayIterator_string(),
+ JS_ARRAY_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
+ array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
+ native_context()->set_initial_array_iterator_map(
+ array_iterator_function->initial_map());
native_context()->set_initial_array_iterator_prototype(
*array_iterator_prototype);
- native_context()->set_initial_array_iterator_prototype_map(
- array_iterator_prototype->map());
-
- Handle<Map> initial_map(array_iterator_function->initial_map(), isolate);
-
-#define ARRAY_ITERATOR_LIST(V) \
- V(TYPED_ARRAY, KEY, typed_array, key) \
- V(FAST_ARRAY, KEY, fast_array, key) \
- V(GENERIC_ARRAY, KEY, array, key) \
- V(UINT8_ARRAY, KEY_VALUE, uint8_array, key_value) \
- V(INT8_ARRAY, KEY_VALUE, int8_array, key_value) \
- V(UINT16_ARRAY, KEY_VALUE, uint16_array, key_value) \
- V(INT16_ARRAY, KEY_VALUE, int16_array, key_value) \
- V(UINT32_ARRAY, KEY_VALUE, uint32_array, key_value) \
- V(INT32_ARRAY, KEY_VALUE, int32_array, key_value) \
- V(FLOAT32_ARRAY, KEY_VALUE, float32_array, key_value) \
- V(FLOAT64_ARRAY, KEY_VALUE, float64_array, key_value) \
- V(UINT8_CLAMPED_ARRAY, KEY_VALUE, uint8_clamped_array, key_value) \
- V(BIGUINT64_ARRAY, KEY_VALUE, biguint64_array, key_value) \
- V(BIGINT64_ARRAY, KEY_VALUE, bigint64_array, key_value) \
- V(FAST_SMI_ARRAY, KEY_VALUE, fast_smi_array, key_value) \
- V(FAST_HOLEY_SMI_ARRAY, KEY_VALUE, fast_holey_smi_array, key_value) \
- V(FAST_ARRAY, KEY_VALUE, fast_array, key_value) \
- V(FAST_HOLEY_ARRAY, KEY_VALUE, fast_holey_array, key_value) \
- V(FAST_DOUBLE_ARRAY, KEY_VALUE, fast_double_array, key_value) \
- V(FAST_HOLEY_DOUBLE_ARRAY, KEY_VALUE, fast_holey_double_array, key_value) \
- V(GENERIC_ARRAY, KEY_VALUE, array, key_value) \
- V(UINT8_ARRAY, VALUE, uint8_array, value) \
- V(INT8_ARRAY, VALUE, int8_array, value) \
- V(UINT16_ARRAY, VALUE, uint16_array, value) \
- V(INT16_ARRAY, VALUE, int16_array, value) \
- V(UINT32_ARRAY, VALUE, uint32_array, value) \
- V(INT32_ARRAY, VALUE, int32_array, value) \
- V(FLOAT32_ARRAY, VALUE, float32_array, value) \
- V(FLOAT64_ARRAY, VALUE, float64_array, value) \
- V(UINT8_CLAMPED_ARRAY, VALUE, uint8_clamped_array, value) \
- V(BIGUINT64_ARRAY, VALUE, biguint64_array, value) \
- V(BIGINT64_ARRAY, VALUE, bigint64_array, value) \
- V(FAST_SMI_ARRAY, VALUE, fast_smi_array, value) \
- V(FAST_HOLEY_SMI_ARRAY, VALUE, fast_holey_smi_array, value) \
- V(FAST_ARRAY, VALUE, fast_array, value) \
- V(FAST_HOLEY_ARRAY, VALUE, fast_holey_array, value) \
- V(FAST_DOUBLE_ARRAY, VALUE, fast_double_array, value) \
- V(FAST_HOLEY_DOUBLE_ARRAY, VALUE, fast_holey_double_array, value) \
- V(GENERIC_ARRAY, VALUE, array, value)
-
-#define CREATE_ARRAY_ITERATOR_MAP(PREFIX, SUFFIX, prefix, suffix) \
- do { \
- const InstanceType type = JS_##PREFIX##_##SUFFIX##_ITERATOR_TYPE; \
- Handle<Map> map = \
- Map::Copy(initial_map, "JS_" #PREFIX "_" #SUFFIX "_ITERATOR_TYPE"); \
- map->set_instance_type(type); \
- native_context()->set_##prefix##_##suffix##_iterator_map(*map); \
- } while (0);
-
- ARRAY_ITERATOR_LIST(CREATE_ARRAY_ITERATOR_MAP)
-
-#undef CREATE_ARRAY_ITERATOR_MAP
-#undef ARRAY_ITERATOR_LIST
}
{ // --- N u m b e r ---
@@ -1792,8 +1708,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(), Builtins::kNumberConstructor);
number_fun->shared()->set_builtin_function_id(kNumberConstructor);
number_fun->shared()->DontAdaptArguments();
- number_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, NumberConstructor_ConstructStub));
number_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, number_fun,
Context::NUMBER_FUNCTION_INDEX);
@@ -1908,8 +1822,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kBooleanConstructor);
boolean_fun->shared()->DontAdaptArguments();
- boolean_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, BooleanConstructor_ConstructStub));
boolean_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
@@ -1936,8 +1848,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate->initial_object_prototype(), Builtins::kStringConstructor);
string_fun->shared()->set_builtin_function_id(kStringConstructor);
- string_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, StringConstructor_ConstructStub));
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, string_fun,
@@ -2118,8 +2028,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
symbol_fun->shared()->set_builtin_function_id(kSymbolConstructor);
- symbol_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, SymbolConstructor_ConstructStub));
symbol_fun->shared()->set_length(0);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
@@ -2186,8 +2094,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->the_hole_value(), Builtins::kDateConstructor);
InstallWithIntrinsicDefaultProto(isolate, date_fun,
Context::DATE_FUNCTION_INDEX);
- date_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, DateConstructor_ConstructStub));
date_fun->shared()->set_length(7);
date_fun->shared()->DontAdaptArguments();
@@ -2311,7 +2217,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{
- Handle<SharedFunctionInfo> info = SimpleCreateConstructorSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info = SimpleCreateBuiltinSharedFunctionInfo(
isolate, Builtins::kPromiseGetCapabilitiesExecutor,
factory->empty_string(), 2);
native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
@@ -2325,7 +2231,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::PROMISE_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->set_internal_formal_parameter_count(1);
shared->set_length(1);
@@ -2422,7 +2327,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
@@ -2649,8 +2553,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
// Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
- native_context()->set_embedder_data(*embedder_data);
+ native_context()->set_embedder_data(*factory->empty_fixed_array());
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
@@ -2927,11 +2830,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- T y p e d A r r a y
- Handle<JSFunction> typed_array_fun =
- CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
- JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, 0,
- factory->the_hole_value(), Builtins::kIllegal);
+ Handle<JSFunction> typed_array_fun = CreateFunction(
+ isolate, factory->InternalizeUtf8String("TypedArray"),
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, 0, factory->the_hole_value(),
+ Builtins::kTypedArrayBaseConstructor);
typed_array_fun->shared()->set_native(false);
+ typed_array_fun->shared()->set_length(0);
InstallSpeciesGetter(typed_array_fun);
native_context()->set_typed_array_function(*typed_array_fun);
@@ -3034,8 +2938,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
- data_view_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, DataViewConstructor_ConstructStub));
data_view_fun->shared()->set_length(3);
data_view_fun->shared()->DontAdaptArguments();
@@ -3114,7 +3016,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_MAP_FUN_INDEX);
Handle<SharedFunctionInfo> shared(js_map_fun->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3172,7 +3073,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_SET_FUN_INDEX);
Handle<SharedFunctionInfo> shared(js_set_fun->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3268,7 +3168,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_WEAK_MAP_FUN_INDEX);
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3299,7 +3198,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_WEAK_SET_FUN_INDEX);
Handle<SharedFunctionInfo> shared(cons->shared(), isolate);
- shared->SetConstructStub(*BUILTIN_CODE(isolate, JSBuiltinsConstructStub));
shared->DontAdaptArguments();
shared->set_length(0);
@@ -3340,17 +3238,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
proxy_function_map->SetInObjectUnusedPropertyFields(unused_property_fields);
Handle<String> name = factory->Proxy_string();
- Handle<Code> code(BUILTIN_CODE(isolate, ProxyConstructor));
NewFunctionArgs args = NewFunctionArgs::ForBuiltin(
- name, code, proxy_function_map, Builtins::kProxyConstructor);
+ name, proxy_function_map, Builtins::kProxyConstructor);
Handle<JSFunction> proxy_function = factory->NewFunction(args);
JSFunction::SetInitialMap(proxy_function, isolate->proxy_map(),
factory->null_value());
- proxy_function->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, ProxyConstructor_ConstructStub));
proxy_function->shared()->set_internal_formal_parameter_count(2);
proxy_function->shared()->set_length(2);
@@ -3447,9 +3342,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- sloppy arguments map
Handle<String> arguments_string = factory->Arguments_string();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithPrototype(
- arguments_string, BUILTIN_CODE(isolate, Illegal),
- isolate->initial_object_prototype(), JS_ARGUMENTS_TYPE,
- JSSloppyArgumentsObject::kSize, 2, Builtins::kIllegal, MUTABLE);
+ arguments_string, isolate->initial_object_prototype(),
+ JS_ARGUMENTS_TYPE, JSSloppyArgumentsObject::kSize, 2,
+ Builtins::kIllegal, MUTABLE);
Handle<JSFunction> function = factory->NewFunction(args);
Handle<Map> map(function->initial_map());
@@ -3577,8 +3472,6 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
result->shared()->DontAdaptArguments();
result->shared()->set_length(3);
- result->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate_, TypedArrayConstructor_ConstructStub));
CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
.FromJust());
@@ -3853,8 +3746,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_function->set_prototype_or_initial_map(
native_context->generator_function_map());
generator_function_function->shared()->DontAdaptArguments();
- generator_function_function->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, GeneratorFunctionConstructor));
generator_function_function->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, generator_function_function,
@@ -3883,8 +3774,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
async_generator_function_function->set_prototype_or_initial_map(
native_context->async_generator_function_map());
async_generator_function_function->shared()->DontAdaptArguments();
- async_generator_function_function->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, AsyncGeneratorFunctionConstructor));
async_generator_function_function->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, async_generator_function_function,
@@ -4096,8 +3985,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
async_function_constructor->set_prototype_or_initial_map(
native_context->async_function_map());
async_function_constructor->shared()->DontAdaptArguments();
- async_function_constructor->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate, AsyncFunctionConstructor));
async_function_constructor->shared()->set_length(1);
native_context->set_async_function_constructor(*async_function_constructor);
JSObject::ForceSetPrototype(async_function_constructor,
@@ -4214,6 +4101,9 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_optional_catch_binding)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_subsume_json)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
+
+#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -4269,7 +4159,7 @@ void Genesis::InitializeGlobal_harmony_string_trimming() {
.ToHandleChecked());
JSObject::AddProperty(string_prototype, trim_start_name, trim_left_fun,
DONT_ENUM);
- trim_left_fun->shared()->set_name(*trim_start_name);
+ trim_left_fun->shared()->SetName(*trim_start_name);
}
{
@@ -4281,7 +4171,7 @@ void Genesis::InitializeGlobal_harmony_string_trimming() {
.ToHandleChecked());
JSObject::AddProperty(string_prototype, trim_end_name, trim_right_fun,
DONT_ENUM);
- trim_right_fun->shared()->set_name(*trim_end_name);
+ trim_right_fun->shared()->SetName(*trim_end_name);
}
}
@@ -4306,6 +4196,75 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
NONE);
}
+void Genesis::InitializeGlobal_harmony_array_flatten() {
+ if (!FLAG_harmony_array_flatten) return;
+ Handle<JSFunction> array_constructor(native_context()->array_function());
+ Handle<JSObject> array_prototype(
+ JSObject::cast(array_constructor->instance_prototype()));
+ SimpleInstallFunction(array_prototype, "flatten",
+ Builtins::kArrayPrototypeFlatten, 0, false, DONT_ENUM);
+ SimpleInstallFunction(array_prototype, "flatMap",
+ Builtins::kArrayPrototypeFlatMap, 1, false, DONT_ENUM);
+}
+
+void Genesis::InitializeGlobal_harmony_string_matchall() {
+ if (!FLAG_harmony_string_matchall) return;
+
+ { // String.prototype.matchAll
+ Handle<JSFunction> string_fun(native_context()->string_function());
+ Handle<JSObject> string_prototype(
+ JSObject::cast(string_fun->instance_prototype()));
+
+ SimpleInstallFunction(string_prototype, "matchAll",
+ Builtins::kStringPrototypeMatchAll, 1, true);
+ }
+
+ { // RegExp.prototype[@@matchAll]
+ Handle<JSFunction> regexp_fun(native_context()->regexp_function());
+ Handle<JSObject> regexp_prototype(
+ JSObject::cast(regexp_fun->instance_prototype()));
+ SimpleInstallFunction(regexp_prototype, factory()->match_all_symbol(),
+ "[Symbol.matchAll]",
+ Builtins::kRegExpPrototypeMatchAll, 1, true);
+ Handle<Map> regexp_prototype_map(regexp_prototype->map());
+ Map::SetShouldBeFastPrototypeMap(regexp_prototype_map, true, isolate());
+ native_context()->set_regexp_prototype_map(*regexp_prototype_map);
+ }
+
+ { // --- R e g E x p S t r i n g I t e r a t o r ---
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype());
+
+ Handle<JSObject> regexp_string_iterator_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::ForceSetPrototype(regexp_string_iterator_prototype,
+ iterator_prototype);
+
+ JSObject::AddProperty(
+ regexp_string_iterator_prototype, factory()->to_string_tag_symbol(),
+ factory()->NewStringFromAsciiChecked("RegExp String Iterator"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ SimpleInstallFunction(regexp_string_iterator_prototype, "next",
+ Builtins::kRegExpStringIteratorPrototypeNext, 0,
+ true);
+
+ Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
+ isolate(), factory()->NewStringFromAsciiChecked("RegExpStringIterator"),
+ JS_REGEXP_STRING_ITERATOR_TYPE, JSRegExpStringIterator::kSize, 0,
+ regexp_string_iterator_prototype, Builtins::kIllegal);
+ regexp_string_iterator_function->shared()->set_native(false);
+ native_context()->set_initial_regexp_string_iterator_prototype_map_index(
+ regexp_string_iterator_function->initial_map());
+ }
+
+ { // @@matchAll Symbol
+ Handle<JSFunction> symbol_fun(native_context()->symbol_function());
+ InstallConstant(isolate(), symbol_fun, "matchAll",
+ factory()->match_all_symbol());
+ }
+}
+
void Genesis::InitializeGlobal_harmony_promise_finally() {
if (!FLAG_harmony_promise_finally) return;
@@ -4368,8 +4327,6 @@ void Genesis::InitializeGlobal_harmony_bigint() {
factory->the_hole_value(), Builtins::kBigIntConstructor);
bigint_fun->shared()->set_builtin_function_id(kBigIntConstructor);
bigint_fun->shared()->DontAdaptArguments();
- bigint_fun->shared()->SetConstructStub(
- *BUILTIN_CODE(isolate(), BigIntConstructor_ConstructStub));
bigint_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate(), bigint_fun,
Context::BIGINT_FUNCTION_INDEX);
@@ -4463,9 +4420,6 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
CreateFunction(isolate(), name, JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithEmbedderFields, 0, prototype,
Builtins::kArrayBufferConstructor);
- Handle<Code> code =
- BUILTIN_CODE(isolate(), ArrayBufferConstructor_ConstructStub);
- array_buffer_fun->shared()->SetConstructStub(*code);
array_buffer_fun->shared()->DontAdaptArguments();
array_buffer_fun->shared()->set_length(1);
@@ -4520,9 +4474,6 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
InstallFunction(target, name, JS_ARRAY_TYPE, JSArray::kSize, 0, prototype,
Builtins::kInternalArrayConstructor);
- InternalArrayConstructorStub internal_array_constructor_stub(isolate());
- Handle<Code> code = internal_array_constructor_stub.GetCode();
- array_function->shared()->SetConstructStub(*code);
array_function->shared()->DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map());
@@ -5485,6 +5436,11 @@ Genesis::Genesis(
ConfigureUtilsObject(context_type);
+ // We created new functions, which may require debug instrumentation.
+ if (isolate->debug()->is_active()) {
+ isolate->debug()->InstallDebugBreakTrampoline();
+ }
+
native_context()->ResetErrorsThrown();
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index a554496dfd..8902ce2529 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -5,7 +5,7 @@
#ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/objects/shared-function-info.h"
#include "src/snapshot/natives.h"
#include "src/visitors.h"
diff --git a/deps/v8/src/boxed-float.h b/deps/v8/src/boxed-float.h
index 18ee98a9c0..cdcc8fdad7 100644
--- a/deps/v8/src/boxed-float.h
+++ b/deps/v8/src/boxed-float.h
@@ -51,8 +51,7 @@ class Float32 {
: bit_pattern_(bit_pattern) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Float32),
- "Float32 should be trivially copyable");
+ASSERT_TRIVIALLY_COPYABLE(Float32);
// Safety wrapper for a 64-bit floating-point value to make sure we don't lose
// the exact bit pattern during deoptimization when passing this value.
@@ -91,8 +90,7 @@ class Float64 {
: bit_pattern_(bit_pattern) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Float64),
- "Float64 should be trivially copyable");
+ASSERT_TRIVIALLY_COPYABLE(Float64);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 329fee575f..de372a6453 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -88,19 +88,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the current native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -109,9 +96,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -124,6 +108,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -131,27 +116,30 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
+ // -- r1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ SmiTst(r2);
+ __ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ SmiTst(r7);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r7, r8, r9, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- __ mov(r3, r1);
+ // r2 is the AllocationSite - here undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ // If r3 (new target) is undefined, then this is the 'Call' case, so move
+ // r1 (the constructor) to r3.
+ __ cmp(r3, r2);
+ __ mov(r3, r1, LeaveCC, eq);
+
// Run the native code for the Array function called as a normal function.
// tail call a stub
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -290,7 +278,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ tst(r4, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ b(ne, &not_create_implicit_receiver);
@@ -410,7 +398,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ tst(r4, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -453,13 +441,23 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ b(ne, &done);
+ __ ldr(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -539,6 +537,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r3, r0);
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -752,7 +751,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r0, r1, r3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r1;
Register optimized_code_entry = scratch1;
@@ -762,9 +761,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -799,12 +798,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -936,6 +933,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
__ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r4);
__ b(ne, &maybe_load_debug_bytecode_array);
@@ -1055,11 +1053,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, r9);
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
__ SmiUntag(r9);
- __ tst(r9, Operand(DebugInfo::kHasBreakInfo));
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
+ __ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r4, Operand(debug_execution_mode));
+ __ ldrsb(r4, MemOperand(r4));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ cmp(r4, r9);
+ __ b(eq, &bytecode_array_loaded);
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ b(&bytecode_array_loaded);
}
@@ -1085,6 +1103,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1113,11 +1132,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1161,15 +1176,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(r2, r5);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1191,10 +1204,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ b(ne, &builtin_trampoline);
+
+ __ ldr(r2,
+ FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r2, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1267,42 +1299,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : argument count (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -- r1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r1;
-
- // Get the feedback vector.
- Register feedback_vector = r2;
- __ ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
- __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1310,6 +1309,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Move(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ b(ne, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmp(data_type, Operand(CODE_TYPE));
+ __ b(eq, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ b(ne, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmp(data_type, Operand(TUPLE2_TYPE));
+ __ b(ne, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ b(ne, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ ldr(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1332,13 +1401,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r5);
- // If SFI points to anything other than CompileLazy, install that.
- __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(r5, masm->CodeObject());
__ cmp(entry, r5);
__ b(eq, &gotta_call_runtime);
@@ -1408,25 +1479,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ ldr(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r5 != target && r5 != scratch0 && r5 != scratch1);
- CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
-
- __ str(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(r9, target_builtin); // Write barrier clobbers r9 below.
@@ -1998,7 +2053,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
__ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ b(ne, &class_constructor);
@@ -2008,7 +2063,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
__ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ b(ne, &done_convert);
@@ -2243,17 +2298,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r1 : the constructor to call (checked to be a JSFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r1);
__ AssertFunction(r1);
// Calling convention for function specific ConstructStubs require
// r2 to contain either an AllocationSite or undefined.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ b(eq, &call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2263,6 +2328,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r1);
__ AssertBoundFunction(r1);
// Push the [[BoundArguments]] onto the stack.
@@ -2291,16 +2357,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r1, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(r2, Operand(Map::IsConstructorBit::kMask));
__ b(eq, &non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2503,28 +2570,34 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r3; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
+ constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r8.
- __ add(r8, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ mov(r8, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
__ ldm(ia_w, sp, gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(r8);
}
#undef __
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index f06969ec6b..aae189f19b 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -19,19 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
__ Mov(x5, ExternalReference(address, masm->isolate()));
@@ -104,9 +91,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
Label generic_array_code;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, x1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +102,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,15 +110,13 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
+ // -- x1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ArrayConstructor");
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, x1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -143,9 +126,14 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- // Run the native code for the Array function called as a normal function.
+ // x2 is the AllocationSite - here undefined.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ Mov(x3, x1);
+ // If x3 (new target) is undefined, then this is the 'Call' case, so move
+ // x1 (the constructor) to x3.
+ __ Cmp(x3, x2);
+ __ CmovX(x3, x1, eq);
+
+ // Run the native code for the Array function called as a normal function.
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -325,7 +313,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w4,
SharedFunctionInfo::IsDerivedConstructorBit::kMask,
&not_create_implicit_receiver);
@@ -451,7 +439,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ Bind(&other_result);
__ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
if (restrict_constructor_return) {
// Throw if constructor function is a class constructor
@@ -495,9 +483,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -596,8 +581,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label check_has_bytecode_array;
__ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
+ __ B(ne, &check_has_bytecode_array);
+ __ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
+ __ Bind(&check_has_bytecode_array);
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -845,7 +835,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = x1;
Register optimized_code_entry = scratch1;
@@ -855,9 +845,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret is at a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -892,12 +882,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ Ldr(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -1025,10 +1013,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
+ has_bytecode_array;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
+ INTERPRETER_DATA_TYPE);
+ __ B(ne, &has_bytecode_array);
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ InterpreterData::kBytecodeArrayOffset));
+ __ Bind(&has_bytecode_array);
__ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
__ Bind(&bytecode_array_loaded);
@@ -1149,12 +1145,26 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ Bind(&maybe_load_debug_bytecode_array);
- __ Ldrsw(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
- __ TestAndBranchIfAllClear(x10, DebugInfo::kHasBreakInfo,
- &bytecode_array_loaded);
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
- __ B(&bytecode_array_loaded);
+ __ Ldr(x10, FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ Mov(kInterpreterBytecodeArrayRegister, x10);
+ __ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
+ __ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
+
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ Mov(x11, Operand(debug_execution_mode));
+ __ Ldrsb(x11, MemOperand(x11));
+ __ CompareAndBranch(x10, x11, eq, &bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector);
+ __ PushArgument(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(feedback_vector, closure);
+ __ jmp(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -1236,6 +1246,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1255,11 +1266,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
receiver_mode, mode);
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1290,15 +1297,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
ConvertReceiverMode::kNullOrUndefined, mode);
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(x1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
- __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x4);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1313,10 +1318,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ B(ne, &builtin_trampoline);
+
+ __ Ldr(x1,
+ FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
+ __ B(&trampoline_loaded);
+
+ __ Bind(&builtin_trampoline);
__ LoadObject(x1, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ Bind(&trampoline_loaded);
__ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1387,42 +1411,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x0 : argument count (preserved for callee)
- // -- x3 : new target (preserved for callee)
- // -- x1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = x1;
-
- // Get the feedback vector.
- Register feedback_vector = x2;
- __ Ldr(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
- __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Mov(x2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1430,6 +1421,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Mov(scratch1, ExternalReference::builtins_address(masm->isolate()));
+ __ Mov(sfi_data, Operand::UntagSmiAndScale(sfi_data, kPointerSizeLog2));
+ __ Ldr(sfi_data, MemOperand(scratch1, sfi_data));
+ __ B(&done);
+
+ // Get map for subsequent checks.
+ __ Bind(&check_is_bytecode_array);
+ __ Ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ Ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ B(ne, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ B(&done);
+
+ // IsCode: Run code
+ __ Bind(&check_is_code);
+ __ Cmp(data_type, Operand(CODE_TYPE));
+ __ B(eq, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ Bind(&check_is_fixed_array);
+ __ Cmp(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ B(ne, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ B(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ Bind(&check_is_pre_parsed_scope_data);
+ __ Cmp(data_type, Operand(TUPLE2_TYPE));
+ __ B(ne, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ B(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ Bind(&check_is_function_template_info);
+ __ Cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ B(ne, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ B(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ Bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Ldr(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ Bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1452,13 +1513,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, x5);
- // If SFI points to anything other than CompileLazy, install that.
- __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(x5, masm->CodeObject());
__ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
@@ -1527,25 +1590,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ Ldr(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(!x5.is(target) && !x5.is(scratch0) && !x5.is(scratch1));
- CHECK(!x9.is(target) && !x9.is(scratch0) && !x9.is(scratch1));
-
- __ Str(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, x9, x5,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ Str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
@@ -2360,7 +2407,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
__ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
&class_constructor);
@@ -2647,18 +2694,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- x1 : the constructor to call (checked to be a JSFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(x1);
__ AssertFunction(x1);
// Calling convention for function specific ConstructStubs require
// x2 to contain either an AllocationSite or undefined.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
- __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x4);
+ __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
+ __ TestAndBranchIfAllClear(
+ w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2668,6 +2724,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(x1);
__ AssertBoundFunction(x1);
// Push the [[BoundArguments]] onto the stack.
@@ -2701,16 +2758,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(x1, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
__ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
&non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
@@ -2977,28 +3035,35 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = x7; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
+ constexpr RegList gp_regs = Register::ListOf<x0, x1, x2, x3, x4, x5>();
constexpr RegList fp_regs =
Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
+ __ Push(x5, x6); // note: pushed twice because alignment required
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ PushArgument(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in x8.
- __ Add(x8, x0, Code::kHeaderSize - kHeapObjectTag);
+ // The entrypoint address is the first return value.
+ __ mov(x8, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
+ __ Pop(x6, x5); // note: pushed twice because alignment required
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(x8);
}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index bb66b082f3..ce19bf8662 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins.h"
-#include "src/api-arguments.h"
+#include "src/api-arguments-inl.h"
#include "src/api-natives.h"
#include "src/builtins/builtins-utils.h"
#include "src/counters.h"
@@ -21,21 +21,17 @@ namespace {
// Returns the holder JSObject if the function can legally be called with this
// receiver. Returns nullptr if the call is illegal.
// TODO(dcarney): CallOptimization duplicates this logic, merge.
-JSReceiver* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
- JSReceiver* receiver) {
+JSObject* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
+ JSObject* receiver) {
Object* recv_type = info->signature();
// No signature, return holder.
if (!recv_type->IsFunctionTemplateInfo()) return receiver;
- // A Proxy cannot have been created from the signature template.
- if (!receiver->IsJSObject()) return nullptr;
-
- JSObject* js_obj_receiver = JSObject::cast(receiver);
FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
// Check the receiver. Fast path for receivers with no hidden prototypes.
- if (signature->IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver->map()->has_hidden_prototype()) return nullptr;
- for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
+ if (signature->IsTemplateFor(receiver)) return receiver;
+ if (!receiver->map()->has_hidden_prototype()) return nullptr;
+ for (PrototypeIterator iter(isolate, receiver, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
!iter.IsAtEnd(); iter.Advance()) {
JSObject* current = iter.GetCurrent<JSObject>();
@@ -45,12 +41,12 @@ JSReceiver* GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo* info,
}
template <bool is_construct>
-MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
Isolate* isolate, Handle<HeapObject> function,
Handle<HeapObject> new_target, Handle<FunctionTemplateInfo> fun_data,
Handle<Object> receiver, BuiltinArguments args) {
- Handle<JSReceiver> js_receiver;
- JSReceiver* raw_holder;
+ Handle<JSObject> js_receiver;
+ JSObject* raw_holder;
if (is_construct) {
DCHECK(args.receiver()->IsTheHole(isolate));
if (fun_data->instance_template()->IsUndefined(isolate)) {
@@ -72,18 +68,21 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
raw_holder = *js_receiver;
} else {
DCHECK(receiver->IsJSReceiver());
- js_receiver = Handle<JSReceiver>::cast(receiver);
+
+ if (!receiver->IsJSObject()) {
+ // This function cannot be called with the given receiver. Abort!
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kIllegalInvocation), Object);
+ }
+
+ js_receiver = Handle<JSObject>::cast(receiver);
if (!fun_data->accept_any_receiver() &&
- js_receiver->IsAccessCheckNeeded()) {
- // Proxies never need access checks.
- DCHECK(js_receiver->IsJSObject());
- Handle<JSObject> js_obj_receiver = Handle<JSObject>::cast(js_receiver);
- if (!isolate->MayAccess(handle(isolate->context()), js_obj_receiver)) {
- isolate->ReportFailedAccessCheck(js_obj_receiver);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
- return isolate->factory()->undefined_value();
- }
+ js_receiver->IsAccessCheckNeeded() &&
+ !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
+ isolate->ReportFailedAccessCheck(js_receiver);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
@@ -180,6 +179,25 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
}
}
+ if (function->IsFunctionTemplateInfo()) {
+ Handle<FunctionTemplateInfo> info =
+ Handle<FunctionTemplateInfo>::cast(function);
+ // If we need to break at function entry, go the long way. Instantiate the
+ // function, use the DebugBreakTrampoline, and call it through JS.
+ if (info->BreakAtEntry()) {
+ DCHECK(!is_construct);
+ DCHECK(new_target->IsUndefined(isolate));
+ Handle<JSFunction> function;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
+ ApiNatives::InstantiateFunction(
+ info, MaybeHandle<v8::internal::Name>()),
+ Object);
+ Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
+ function->set_code(*trampoline);
+ return Execution::Call(isolate, function, receiver, argc, args);
+ }
+ }
+
Handle<FunctionTemplateInfo> fun_data =
function->IsFunctionTemplateInfo()
? Handle<FunctionTemplateInfo>::cast(function)
@@ -224,7 +242,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
// Helper function to handle calls to non-function objects created through the
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
-MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
+V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
Isolate* isolate, bool is_construct_call, BuiltinArguments args) {
Handle<Object> receiver = args.receiver();
@@ -259,7 +277,6 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
-
FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
obj, new_target, &args[0] - 1,
args.length() - 1);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 52a6222882..cca395b2ce 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -8,8 +8,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
#include "src/frame-constants.h"
+#include "src/heap/factory-inl.h"
#include "src/builtins/builtins-array-gen.h"
@@ -151,7 +151,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast);
{
GotoIf(SmiNotEqual(LoadJSArrayLength(a()), to_.value()), &runtime);
- kind = EnsureArrayPushable(a(), &runtime);
+ kind = EnsureArrayPushable(LoadMap(a()), &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
@@ -214,8 +214,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
CSA_ASSERT(this,
SmiLessThanOrEqual(
len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
- fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(o_)),
- LoadInstanceType(LoadElements(a)));
+ fast_typed_array_target_ =
+ Word32Equal(LoadInstanceType(LoadElements(original_array)),
+ LoadInstanceType(LoadElements(a)));
a_.Bind(a);
}
@@ -413,15 +414,15 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// 3. Let len be ToLength(Get(O, "length")).
// 4. ReturnIfAbrupt(len).
- VARIABLE(merged_length, MachineRepresentation::kTagged);
+ TVARIABLE(Number, merged_length);
Label has_length(this, &merged_length), not_js_array(this);
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), &not_js_array);
- merged_length.Bind(LoadJSArrayLength(o()));
+ merged_length = LoadJSArrayLength(CAST(o()));
Goto(&has_length);
BIND(&not_js_array);
Node* len_property =
GetProperty(context(), o(), isolate()->factory()->length_string());
- merged_length.Bind(ToLength_Inline(context(), len_property));
+ merged_length = ToLength_Inline(context(), len_property);
Goto(&has_length);
BIND(&has_length);
len_ = merged_length.value();
@@ -466,7 +467,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::InitIteratingArrayBuiltinLoopContinuation(
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to) {
+ Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
+ TNode<Number> len, Node* to) {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
@@ -489,14 +491,17 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
throw_detached(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
- GotoIfNot(HasInstanceType(receiver_, JS_TYPED_ARRAY_TYPE),
+ GotoIfNot(HasInstanceType(CAST(receiver_), JS_TYPED_ARRAY_TYPE),
&throw_not_typed_array);
- o_ = receiver_;
- Node* array_buffer = LoadObjectField(o_, JSTypedArray::kBufferOffset);
+ TNode<JSTypedArray> typed_array = CAST(receiver_);
+ o_ = typed_array;
+
+ Node* array_buffer =
+ LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
- len_ = LoadObjectField(o_, JSTypedArray::kLengthOffset);
+ len_ = LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -540,7 +545,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
} else {
k_.Bind(NumberDec(len()));
}
- Node* instance_type = LoadInstanceType(LoadElements(o_));
+ Node* instance_type = LoadInstanceType(LoadElements(typed_array));
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
@@ -552,7 +557,8 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
- VisitAllTypedArrayElements(array_buffer, processor, &done, direction);
+ VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
+ typed_array);
Goto(&done);
// No exception, return success
BIND(&done);
@@ -638,12 +644,12 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
Node* array_buffer, const CallResultProcessor& processor, Label* detached,
- ForEachDirection direction) {
+ ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
GotoIf(IsDetachedBuffer(array_buffer), detached);
- Node* elements = LoadElements(o_);
+ Node* elements = LoadElements(typed_array);
Node* base_ptr =
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
Node* external_ptr =
@@ -671,13 +677,13 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
void ArrayBuiltinsAssembler::VisitAllFastElementsOneKind(
ElementsKind kind, const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode, ForEachDirection direction,
- MissingPropertyMode missing_property_mode) {
+ MissingPropertyMode missing_property_mode, TNode<Smi> length) {
Comment("begin VisitAllFastElementsOneKind");
VARIABLE(original_map, MachineRepresentation::kTagged);
original_map.Bind(LoadMap(o()));
VariableList list({&original_map, &a_, &k_, &to_}, zone());
Node* start = IntPtrOrSmiConstant(0, mode);
- Node* end = TaggedToParameter(len(), mode);
+ Node* end = TaggedToParameter(length, mode);
IndexAdvanceMode advance_mode = direction == ForEachDirection::kReverse
? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost;
@@ -695,13 +701,14 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Node* o_map = LoadMap(o());
GotoIf(WordNotEqual(o_map, original_map.value()), array_changed);
+ TNode<JSArray> o_array = CAST(o());
// Check if o's length has changed during the callback and if the
// index is now out of range of the new length.
- GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o())),
+ GotoIf(SmiGreaterThanOrEqual(k_.value(), LoadJSArrayLength(o_array)),
array_changed);
// Re-load the elements array. If may have been resized.
- Node* elements = LoadElements(o());
+ Node* elements = LoadElements(o_array);
// Fast case: load the element directly from the elements FixedArray
// and call the callback if the element is not the hole.
@@ -757,6 +764,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
&switch_on_elements_kind, slow);
BIND(&switch_on_elements_kind);
+ TNode<Smi> smi_len = CAST(len());
// Select by ElementsKind
Node* o_map = LoadMap(o());
Node* bit_field2 = LoadMapBitField2(o_map);
@@ -768,7 +776,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast_elements);
{
VisitAllFastElementsOneKind(PACKED_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode);
+ direction, missing_property_mode, smi_len);
action(this);
@@ -783,7 +791,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
BIND(&fast_double_elements);
{
VisitAllFastElementsOneKind(PACKED_DOUBLE_ELEMENTS, processor, slow, mode,
- direction, missing_property_mode);
+ direction, missing_property_mode, smi_len);
action(this);
@@ -807,7 +815,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
&runtime);
- Node* species_protector = SpeciesProtectorConstant();
+ Node* species_protector = ArraySpeciesProtectorConstant();
Node* value =
LoadObjectField(species_protector, PropertyCell::kValueOffset);
TNode<Smi> const protector_invalid =
@@ -841,8 +849,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(
- SloppyTNode<Smi> len) {
+ void ArrayBuiltinsAssembler::GenerateArraySpeciesCreate(TNode<Number> len) {
Label runtime(this, Label::kDeferred), done(this);
Node* const original_map = LoadMap(o());
@@ -853,7 +860,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIfNot(IsPrototypeInitialArrayPrototype(context(), original_map),
&runtime);
- Node* species_protector = SpeciesProtectorConstant();
+ Node* species_protector = ArraySpeciesProtectorConstant();
Node* value =
LoadObjectField(species_protector, PropertyCell::kValueOffset);
Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
@@ -1019,7 +1026,7 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
{
array_receiver = CAST(receiver);
arg_index = IntPtrConstant(0);
- kind = EnsureArrayPushable(array_receiver, &runtime);
+ kind = EnsureArrayPushable(LoadMap(array_receiver), &runtime);
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS),
&object_push_pre);
@@ -1146,7 +1153,7 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
GotoIf(IsNoElementsProtectorCellInvalid(), slow);
- GotoIf(IsSpeciesProtectorCellInvalid(), slow);
+ GotoIf(IsArraySpeciesProtectorCellInvalid(), slow);
// Bailout if receiver has slow elements.
Node* elements_kind = LoadMapElementsKind(map);
@@ -1647,9 +1654,9 @@ TF_BUILTIN(ArrayFindLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -1670,7 +1677,7 @@ TF_BUILTIN(ArrayFindLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -1685,7 +1692,7 @@ TF_BUILTIN(ArrayFindLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -1702,7 +1709,7 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* found_value = Parameter(Descriptor::kFoundValue);
Node* is_found = Parameter(Descriptor::kIsFound);
@@ -1748,9 +1755,9 @@ TF_BUILTIN(ArrayFindIndexLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -1769,7 +1776,7 @@ TF_BUILTIN(ArrayFindIndexLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
@@ -1782,7 +1789,7 @@ TF_BUILTIN(ArrayFindIndexLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayFindIndexLoopContinuation, context,
receiver, callbackfn, this_arg, SmiConstant(-1), receiver,
@@ -1796,7 +1803,7 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* found_value = Parameter(Descriptor::kFoundValue);
Node* is_found = Parameter(Descriptor::kIsFound);
@@ -1938,13 +1945,10 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
void GenerateSetLength(TNode<Context> context, TNode<Object> array,
TNode<Number> length) {
Label fast(this), runtime(this), done(this);
- // TODO(delphick): We should be able to skip the fast set altogether, if the
- // length already equals the expected length, which it always is now on the
- // fast path.
- // Only set the length in this stub if
- // 1) the array has fast elements,
- // 2) the length is writable,
- // 3) the new length is equal to the old length.
+ // There's no need to set the length, if
+ // 1) the array is a fast JS array and
+ // 2) the new length is equal to the old length.
+ // as the set is not observable. Otherwise fall back to the run-time.
// 1) Check that the array has fast elements.
// TODO(delphick): Consider changing this since it does an an unnecessary
@@ -1961,20 +1965,11 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
TNode<Smi> old_length = LoadFastJSArrayLength(fast_array);
CSA_ASSERT(this, TaggedIsPositiveSmi(old_length));
- // 2) Ensure that the length is writable.
- // TODO(delphick): This check may be redundant due to the
- // BranchIfFastJSArray above.
- EnsureArrayLengthWritable(LoadMap(fast_array), &runtime);
-
- // 3) If the created array's length does not match the required length,
- // then use the runtime to set the property as that will insert holes
- // into excess elements or shrink the backing store as appropriate.
- GotoIf(SmiNotEqual(length_smi, old_length), &runtime);
-
- StoreObjectFieldNoWriteBarrier(fast_array, JSArray::kLengthOffset,
- length_smi);
-
- Goto(&done);
+ // 2) If the created array's length matches the required length, then
+ // there's nothing else to do. Otherwise use the runtime to set the
+ // property as that will insert holes into excess elements or shrink
+ // the backing store as appropriate.
+ Branch(SmiNotEqual(length_smi, old_length), &runtime, &done);
}
BIND(&runtime);
@@ -2128,14 +2123,11 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
}
}
- // Since there's no iterator, items cannot be a Fast JS Array.
BIND(&not_iterable);
{
- CSA_ASSERT(this, Word32BinaryNot(IsFastJSArray(array_like, context)));
-
// Treat array_like as an array and try to get its length.
- length = CAST(ToLength_Inline(
- context, GetProperty(context, array_like, factory()->length_string())));
+ length = ToLength_Inline(
+ context, GetProperty(context, array_like, factory()->length_string()));
// Construct an array using the receiver as constructor with the same length
// as the input array.
@@ -2257,9 +2249,9 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2277,7 +2269,7 @@ TF_BUILTIN(ArrayForEachLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -2290,7 +2282,7 @@ TF_BUILTIN(ArrayForEachLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayForEachLoopContinuation, context, receiver,
callbackfn, this_arg, UndefinedConstant(), receiver,
@@ -2345,7 +2337,7 @@ TF_BUILTIN(ArraySomeLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. every() needs
@@ -2374,7 +2366,7 @@ TF_BUILTIN(ArraySomeLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArraySomeLoopContinuation, context, receiver,
callbackfn, this_arg, FalseConstant(), receiver, initial_k,
@@ -2387,9 +2379,9 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2448,7 +2440,7 @@ TF_BUILTIN(ArrayEveryLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. every() needs
@@ -2477,7 +2469,7 @@ TF_BUILTIN(ArrayEveryLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayEveryLoopContinuation, context, receiver,
callbackfn, this_arg, TrueConstant(), receiver, initial_k,
@@ -2490,9 +2482,9 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2551,9 +2543,9 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2570,16 +2562,14 @@ TF_BUILTIN(ArrayReducePreLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
// Simulate starting the loop at 0, but ensuring that the accumulator is
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- TheHoleConstant(), receiver, SmiConstant(0), len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), TheHoleConstant(),
+ receiver, SmiConstant(0), len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2588,12 +2578,11 @@ TF_BUILTIN(ArrayReduceLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- accumulator, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), accumulator, receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2601,13 +2590,12 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
- Callable stub(
- Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- result, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceLoopContinuation, context, receiver,
+ callbackfn, UndefinedConstant(), result, receiver,
+ initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
@@ -2657,9 +2645,9 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* accumulator = Parameter(Descriptor::kAccumulator);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2677,16 +2665,15 @@ TF_BUILTIN(ArrayReduceRightPreLoopEagerDeoptContinuation,
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Smi> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
// Simulate starting the loop at 0, but ensuring that the accumulator is
// the hole. The continuation stub will search for the initial non-hole
// element, rightly throwing an exception if not found.
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- TheHoleConstant(), receiver, SmiConstant(0), len,
- UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(),
+ TheHoleConstant(), receiver, SmiSub(len, SmiConstant(1)),
+ len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2695,12 +2682,11 @@ TF_BUILTIN(ArrayReduceRightLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* accumulator = Parameter(Descriptor::kAccumulator);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- accumulator, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(), accumulator,
+ receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
@@ -2708,13 +2694,12 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
- Callable stub(Builtins::CallableFor(
- isolate(), Builtins::kArrayReduceRightLoopContinuation));
- Return(CallStub(stub, context, receiver, callbackfn, UndefinedConstant(),
- result, receiver, initial_k, len, UndefinedConstant()));
+ Return(CallBuiltin(Builtins::kArrayReduceRightLoopContinuation, context,
+ receiver, callbackfn, UndefinedConstant(), result,
+ receiver, initial_k, len, UndefinedConstant()));
}
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
@@ -2767,9 +2752,9 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2788,7 +2773,7 @@ TF_BUILTIN(ArrayFilterLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
Return(CallBuiltin(Builtins::kArrayFilterLoopContinuation, context, receiver,
@@ -2803,7 +2788,7 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* value_k = Parameter(Descriptor::kValueK);
Node* result = Parameter(Descriptor::kResult);
@@ -2864,9 +2849,9 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinsAssembler) {
Node* callbackfn = Parameter(Descriptor::kCallbackFn);
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
- Node* object = Parameter(Descriptor::kObject);
+ TNode<JSReceiver> object = CAST(Parameter(Descriptor::kObject));
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* to = Parameter(Descriptor::kTo);
InitIteratingArrayBuiltinLoopContinuation(context, receiver, callbackfn,
@@ -2885,7 +2870,7 @@ TF_BUILTIN(ArrayMapLoopEagerDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Return(CallBuiltin(Builtins::kArrayMapLoopContinuation, context, receiver,
callbackfn, this_arg, array, receiver, initial_k, len,
@@ -2899,7 +2884,7 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
Node* this_arg = Parameter(Descriptor::kThisArg);
Node* array = Parameter(Descriptor::kArray);
Node* initial_k = Parameter(Descriptor::kInitialK);
- Node* len = Parameter(Descriptor::kLength);
+ TNode<Number> len = CAST(Parameter(Descriptor::kLength));
Node* result = Parameter(Descriptor::kResult);
// This custom lazy deopt point is right after the callback. map() needs
@@ -2965,7 +2950,7 @@ TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
Label call_runtime(this), return_true(this), return_false(this);
GotoIf(TaggedIsSmi(object), &return_false);
- TNode<Word32T> instance_type = LoadInstanceType(CAST(object));
+ TNode<Int32T> instance_type = LoadInstanceType(CAST(object));
GotoIf(InstanceTypeEqual(instance_type, JS_ARRAY_TYPE), &return_true);
@@ -2991,6 +2976,15 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
enum SearchVariant { kIncludes, kIndexOf };
void Generate(SearchVariant variant);
+ void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
+ void GeneratePackedDoubles(SearchVariant variant, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
+ void GenerateHoleyDoubles(SearchVariant variant, Node* elements,
+ Node* search_element, Node* array_length,
+ Node* from_index);
};
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
@@ -3008,8 +3002,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Node* intptr_zero = IntPtrConstant(0);
- Label init_index(this), return_found(this), return_not_found(this),
- call_runtime(this);
+ Label init_index(this), return_not_found(this), call_runtime(this);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
@@ -3021,7 +3014,8 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
// JSArray length is always a positive Smi for fast arrays.
CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
- Node* array_length = SmiUntag(LoadFastJSArrayLength(array));
+ Node* array_length = LoadFastJSArrayLength(array);
+ Node* array_length_untagged = SmiUntag(array_length);
{
// Initialize fromIndex.
@@ -3049,7 +3043,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
// The fromIndex is negative: add it to the array's length.
- index_var.Bind(IntPtrAdd(array_length, index_var.value()));
+ index_var.Bind(IntPtrAdd(array_length_untagged, index_var.value()));
// Clamp negative results at zero.
GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
index_var.Bind(intptr_zero);
@@ -3059,7 +3053,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
}
// Fail early if startIndex >= array.length.
- GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length),
+ GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length_untagged),
&return_not_found);
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
@@ -3080,179 +3074,139 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
BIND(&if_smiorobjects);
{
- VARIABLE(search_num, MachineRepresentation::kFloat64);
- Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
- string_loop(this), bigint_loop(this, &index_var),
- undef_loop(this, &index_var), not_smi(this), not_heap_num(this);
-
- GotoIfNot(TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&heap_num_loop);
-
- BIND(&not_smi);
- if (variant == kIncludes) {
- GotoIf(IsUndefined(search_element), &undef_loop);
- }
- Node* map = LoadMap(CAST(search_element));
- GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
- Goto(&heap_num_loop);
-
- BIND(&not_heap_num);
- Node* search_type = LoadMapInstanceType(map);
- GotoIf(IsStringInstanceType(search_type), &string_loop);
- GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
- Goto(&ident_loop);
-
- BIND(&ident_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(WordEqual(element_k, search_element), &return_found);
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesSmiOrObject)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfSmiOrObject);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- Increment(&index_var);
- Goto(&ident_loop);
- }
+ BIND(&if_packed_doubles);
+ {
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesPackedDoubles)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfPackedDoubles);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- if (variant == kIncludes) {
- BIND(&undef_loop);
+ BIND(&if_holey_doubles);
+ {
+ Callable callable =
+ (variant == kIncludes)
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kArrayIncludesHoleyDoubles)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kArrayIndexOfHoleyDoubles);
+ Node* result = CallStub(callable, context, elements, search_element,
+ array_length, SmiTag(index_var.value()));
+ args.PopAndReturn(result);
+ }
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(IsUndefined(element_k), &return_found);
- GotoIf(IsTheHole(element_k), &return_found);
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ args.PopAndReturn(FalseConstant());
+ } else {
+ args.PopAndReturn(NumberConstant(-1));
+ }
- Increment(&index_var);
- Goto(&undef_loop);
- }
+ BIND(&call_runtime);
+ {
+ Node* start_from =
+ args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ Runtime::FunctionId function = variant == kIncludes
+ ? Runtime::kArrayIncludes_Slow
+ : Runtime::kArrayIndexOf;
+ args.PopAndReturn(
+ CallRuntime(function, context, array, search_element, start_from));
+ }
+}
- BIND(&heap_num_loop);
- {
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
- Label* nan_handling =
- variant == kIncludes ? &nan_loop : &return_not_found;
- BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+void ArrayIncludesIndexofAssembler::GenerateSmiOrObject(
+ SearchVariant variant, Node* context, Node* elements, Node* search_element,
+ Node* array_length, Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ Node* array_length_untagged = SmiUntag(array_length);
- BIND(&not_nan_loop);
- {
- Label continue_loop(this), not_smi(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIfNot(TaggedIsSmi(element_k), &not_smi);
- Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
- &return_found, &continue_loop);
-
- BIND(&not_smi);
- GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
- &return_found, &continue_loop);
-
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&not_nan_loop);
- }
+ Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
+ string_loop(this), bigint_loop(this, &index_var),
+ undef_loop(this, &index_var), not_smi(this), not_heap_num(this),
+ return_found(this), return_not_found(this);
- // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
- if (variant == kIncludes) {
- BIND(&nan_loop);
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
- &continue_loop);
-
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&nan_loop);
- }
- }
+ GotoIfNot(TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&heap_num_loop);
- BIND(&string_loop);
- {
- TNode<String> search_element_string = CAST(search_element);
- Label continue_loop(this), next_iteration(this, &index_var),
- slow_compare(this), runtime(this, Label::kDeferred);
- TNode<IntPtrT> search_length =
- LoadStringLengthAsWord(search_element_string);
- Goto(&next_iteration);
- BIND(&next_iteration);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIf(WordEqual(search_element_string, element_k), &return_found);
- Node* element_k_type = LoadInstanceType(element_k);
- GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
- Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
- &slow_compare, &continue_loop);
-
- BIND(&slow_compare);
- StringBuiltinsAssembler string_asm(state());
- string_asm.StringEqual_Core(context, search_element_string, search_type,
- element_k, element_k_type, search_length,
- &return_found, &continue_loop, &runtime);
- BIND(&runtime);
- TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
- search_element_string, element_k);
- Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+ BIND(&not_smi);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &undef_loop);
+ }
+ Node* map = LoadMap(search_element);
+ GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(LoadHeapNumberValue(search_element));
+ Goto(&heap_num_loop);
+
+ BIND(&not_heap_num);
+ Node* search_type = LoadMapInstanceType(map);
+ GotoIf(IsStringInstanceType(search_type), &string_loop);
+ GotoIf(IsBigIntInstanceType(search_type), &bigint_loop);
+ Goto(&ident_loop);
+
+ BIND(&ident_loop);
+ {
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(WordEqual(element_k, search_element), &return_found);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&next_iteration);
- }
+ Increment(&index_var);
+ Goto(&ident_loop);
+ }
- BIND(&bigint_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ if (variant == kIncludes) {
+ BIND(&undef_loop);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- Label continue_loop(this);
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsBigInt(element_k), &continue_loop);
- TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
- search_element, element_k);
- Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(IsUndefined(element_k), &return_found);
+ GotoIf(IsTheHole(element_k), &return_found);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&bigint_loop);
- }
+ Increment(&index_var);
+ Goto(&undef_loop);
}
- BIND(&if_packed_doubles);
+ BIND(&heap_num_loop);
{
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
- hole_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
-
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
-
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
BIND(&not_nan_loop);
{
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ Label continue_loop(this), not_smi(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
- Branch(Float64Equal(element_k, search_num.value()), &return_found,
- &continue_loop);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+ Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
+ &return_found, &continue_loop);
+
+ BIND(&not_smi);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
+ &return_found, &continue_loop);
+
BIND(&continue_loop);
Increment(&index_var);
Goto(&not_nan_loop);
@@ -3262,112 +3216,249 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
if (variant == kIncludes) {
BIND(&nan_loop);
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
&return_not_found);
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
- BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+ BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
+ &continue_loop);
+
BIND(&continue_loop);
Increment(&index_var);
Goto(&nan_loop);
}
}
- BIND(&if_holey_doubles);
+ BIND(&string_loop);
{
- Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
- hole_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(CAST(search_element)));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- if (variant == kIncludes) {
- GotoIf(IsUndefined(search_element), &hole_loop);
- }
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+ TNode<String> search_element_string = CAST(search_element);
+ Label continue_loop(this), next_iteration(this, &index_var),
+ slow_compare(this), runtime(this, Label::kDeferred);
+ TNode<IntPtrT> search_length =
+ LoadStringLengthAsWord(search_element_string);
+ Goto(&next_iteration);
+ BIND(&next_iteration);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIf(WordEqual(search_element_string, element_k), &return_found);
+ Node* element_k_type = LoadInstanceType(element_k);
+ GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
+ Branch(WordEqual(search_length, LoadStringLengthAsWord(element_k)),
+ &slow_compare, &continue_loop);
+
+ BIND(&slow_compare);
+ StringBuiltinsAssembler string_asm(state());
+ string_asm.StringEqual_Core(context, search_element_string, search_type,
+ element_k, element_k_type, search_length,
+ &return_found, &continue_loop, &runtime);
+ BIND(&runtime);
+ TNode<Object> result = CallRuntime(Runtime::kStringEqual, context,
+ search_element_string, element_k);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
- search_num.Bind(LoadHeapNumberValue(CAST(search_element)));
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&next_iteration);
+ }
- Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
- BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+ BIND(&bigint_loop);
+ {
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+ Label continue_loop(this);
+ GotoIf(TaggedIsSmi(element_k), &continue_loop);
+ GotoIfNot(IsBigInt(element_k), &continue_loop);
+ TNode<Object> result = CallRuntime(Runtime::kBigIntEqualToBigInt, context,
+ search_element, element_k);
+ Branch(WordEqual(result, TrueConstant()), &return_found, &continue_loop);
+
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&bigint_loop);
+ }
+ BIND(&return_found);
+ if (variant == kIncludes) {
+ Return(TrueConstant());
+ } else {
+ Return(SmiTag(index_var.value()));
+ }
- BIND(&not_nan_loop);
- {
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ Return(FalseConstant());
+ } else {
+ Return(NumberConstant(-1));
+ }
+}
- // No need for hole checking here; the following Float64Equal will
- // return 'not equal' for holes anyway.
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
+void ArrayIncludesIndexofAssembler::GeneratePackedDoubles(SearchVariant variant,
+ Node* elements,
+ Node* search_element,
+ Node* array_length,
+ Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ Node* array_length_untagged = SmiUntag(array_length);
- Branch(Float64Equal(element_k, search_num.value()), &return_found,
- &continue_loop);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&not_nan_loop);
- }
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this), return_found(this),
+ return_not_found(this);
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ search_num.Bind(Float64Constant(0));
- // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
- if (variant == kIncludes) {
- BIND(&nan_loop);
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
- // Load double value or continue if it's the hole NaN.
- Node* element_k = LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &continue_loop);
+ BIND(&search_notnan);
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
- BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
- BIND(&continue_loop);
- Increment(&index_var);
- Goto(&nan_loop);
- }
+ search_num.Bind(LoadHeapNumberValue(search_element));
- // Array.p.includes treats the hole as undefined.
- if (variant == kIncludes) {
- BIND(&hole_loop);
- GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
- &return_not_found);
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
- // Check if the element is a double hole, but don't load it.
- LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::None(), 0, INTPTR_PARAMETERS,
- &return_found);
+ BIND(&not_nan_loop);
+ {
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
+ &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&not_nan_loop);
+ }
- Increment(&index_var);
- Goto(&hole_loop);
- }
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&nan_loop);
}
BIND(&return_found);
if (variant == kIncludes) {
- args.PopAndReturn(TrueConstant());
+ Return(TrueConstant());
} else {
- args.PopAndReturn(SmiTag(index_var.value()));
+ Return(SmiTag(index_var.value()));
}
BIND(&return_not_found);
if (variant == kIncludes) {
- args.PopAndReturn(FalseConstant());
+ Return(FalseConstant());
} else {
- args.PopAndReturn(NumberConstant(-1));
+ Return(NumberConstant(-1));
}
+}
- BIND(&call_runtime);
+void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
+ Node* elements,
+ Node* search_element,
+ Node* array_length,
+ Node* from_index) {
+ VARIABLE(index_var, MachineType::PointerRepresentation(),
+ SmiUntag(from_index));
+ Node* array_length_untagged = SmiUntag(array_length);
+
+ Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+ hole_loop(this, &index_var), search_notnan(this), return_found(this),
+ return_not_found(this);
+ VARIABLE(search_num, MachineRepresentation::kFloat64);
+ search_num.Bind(Float64Constant(0));
+
+ GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(SmiToFloat64(search_element));
+ Goto(&not_nan_loop);
+
+ BIND(&search_notnan);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &hole_loop);
+ }
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
+
+ search_num.Bind(LoadHeapNumberValue(search_element));
+
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
+
+ BIND(&not_nan_loop);
{
- Node* start_from = args.GetOptionalArgumentValue(kFromIndexArg);
- Runtime::FunctionId function = variant == kIncludes
- ? Runtime::kArrayIncludes_Slow
- : Runtime::kArrayIndexOf;
- args.PopAndReturn(
- CallRuntime(function, context, array, search_element, start_from));
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // No need for hole checking here; the following Float64Equal will
+ // return 'not equal' for holes anyway.
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
+
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
+ &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&not_nan_loop);
+ }
+
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
+ Label continue_loop(this);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // Load double value or continue if it's the hole NaN.
+ Node* element_k = LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, &continue_loop);
+
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
+ BIND(&continue_loop);
+ Increment(&index_var);
+ Goto(&nan_loop);
+ }
+
+ // Array.p.includes treats the hole as undefined.
+ if (variant == kIncludes) {
+ BIND(&hole_loop);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged),
+ &return_not_found);
+
+ // Check if the element is a double hole, but don't load it.
+ LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::None(), 0, INTPTR_PARAMETERS,
+ &return_found);
+
+ Increment(&index_var);
+ Goto(&hole_loop);
+ }
+
+ BIND(&return_found);
+ if (variant == kIncludes) {
+ Return(TrueConstant());
+ } else {
+ Return(SmiTag(index_var.value()));
+ }
+
+ BIND(&return_not_found);
+ if (variant == kIncludes) {
+ Return(FalseConstant());
+ } else {
+ Return(NumberConstant(-1));
}
}
@@ -3375,69 +3466,95 @@ TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
Generate(kIncludes);
}
-TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
+TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
-class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
- public:
- explicit ArrayPrototypeIterationAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ GenerateSmiOrObject(kIncludes, context, elements, search_element,
+ array_length, from_index);
+}
+
+TF_BUILTIN(ArrayIncludesPackedDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
+
+ GeneratePackedDoubles(kIncludes, elements, search_element, array_length,
+ from_index);
+}
- protected:
- void Generate_ArrayPrototypeIterationMethod(TNode<Context> context,
- TNode<Object> receiver,
- IterationKind iteration_kind) {
- VARIABLE(var_array, MachineRepresentation::kTagged);
- VARIABLE(var_map, MachineRepresentation::kTagged);
- VARIABLE(var_type, MachineRepresentation::kWord32);
+TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
- Label if_isnotobject(this, Label::kDeferred);
- Label create_array_iterator(this);
+ GenerateHoleyDoubles(kIncludes, elements, search_element, array_length,
+ from_index);
+}
- GotoIf(TaggedIsSmi(receiver), &if_isnotobject);
+TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
- TNode<HeapObject> object_receiver = CAST(receiver);
- var_array.Bind(object_receiver);
- var_map.Bind(LoadMap(object_receiver));
- var_type.Bind(LoadMapInstanceType(var_map.value()));
- Branch(IsJSReceiverInstanceType(var_type.value()), &create_array_iterator,
- &if_isnotobject);
+TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
- BIND(&if_isnotobject);
- {
- TNode<JSReceiver> result = ToObject(context, receiver);
- var_array.Bind(result);
- var_map.Bind(LoadMap(result));
- var_type.Bind(LoadMapInstanceType(var_map.value()));
- Goto(&create_array_iterator);
- }
+ GenerateSmiOrObject(kIndexOf, context, elements, search_element, array_length,
+ from_index);
+}
- BIND(&create_array_iterator);
- Return(CreateArrayIterator(var_array.value(), var_map.value(),
- var_type.value(), context, iteration_kind));
- }
-};
+TF_BUILTIN(ArrayIndexOfPackedDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
+
+ GeneratePackedDoubles(kIndexOf, elements, search_element, array_length,
+ from_index);
+}
+
+TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) {
+ Node* elements = Parameter(Descriptor::kElements);
+ Node* search_element = Parameter(Descriptor::kSearchElement);
+ Node* array_length = Parameter(Descriptor::kLength);
+ Node* from_index = Parameter(Descriptor::kFromIndex);
-TF_BUILTIN(ArrayPrototypeValues, ArrayPrototypeIterationAssembler) {
+ GenerateHoleyDoubles(kIndexOf, elements, search_element, array_length,
+ from_index);
+}
+
+// ES #sec-array.prototype.values
+TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kValues);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kValues));
}
-TF_BUILTIN(ArrayPrototypeEntries, ArrayPrototypeIterationAssembler) {
+// ES #sec-array.prototype.entries
+TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kEntries);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kEntries));
}
-TF_BUILTIN(ArrayPrototypeKeys, ArrayPrototypeIterationAssembler) {
+// ES #sec-array.prototype.keys
+TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- Generate_ArrayPrototypeIterationMethod(context, receiver,
- IterationKind::kKeys);
+ Return(CreateArrayIterator(context, ToObject(context, receiver),
+ IterationKind::kKeys));
}
+// ES #sec-%arrayiteratorprototype%.next
TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
const char* method_name = "Array Iterator.prototype.next";
@@ -3454,16 +3571,13 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Label throw_bad_receiver(this, Label::kDeferred);
Label set_done(this);
- Label allocate_key_result(this);
Label allocate_entry_if_needed(this);
Label allocate_iterator_result(this);
- Label generic_values(this);
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- TNode<Int32T> instance_type = LoadInstanceType(iterator);
- GotoIf(IsArrayIteratorInstanceType(instance_type), &throw_bad_receiver);
+ GotoIfNot(IsJSArrayIterator(iterator), &throw_bad_receiver);
// Let a be O.[[IteratedObject]].
Node* array =
@@ -3471,19 +3585,23 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// Let index be O.[[ArrayIteratorNextIndex]].
Node* index = LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
- Node* orig_map =
- LoadObjectField(iterator, JSArrayIterator::kIteratedObjectMapOffset);
Node* array_map = LoadMap(array);
- Label if_isfastarray(this), if_isnotfastarray(this),
- if_isdetached(this, Label::kDeferred);
+ Label if_detached(this, Label::kDeferred);
+
+ Label if_typedarray(this), if_other(this, Label::kDeferred), if_array(this),
+ if_generic(this, Label::kDeferred);
- Branch(WordEqual(orig_map, array_map), &if_isfastarray, &if_isnotfastarray);
+ Node* array_type = LoadInstanceType(array);
+ GotoIf(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_array);
+ Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_typedarray,
+ &if_other);
- BIND(&if_isfastarray);
+ BIND(&if_array);
{
- CSA_ASSERT(
- this, InstanceTypeEqual(LoadMapInstanceType(array_map), JS_ARRAY_TYPE));
+ // We can only handle fast elements here.
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ GotoIfNot(IsFastElementsKind(elements_kind), &if_other);
Node* length = LoadJSArrayLength(array);
@@ -3492,295 +3610,178 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
GotoIfNot(SmiBelow(index, length), &set_done);
+ var_value.Bind(index);
Node* one = SmiConstant(1);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiAdd(index, one));
-
var_done.Bind(FalseConstant());
- Node* elements = LoadElements(array);
- static int32_t kInstanceType[] = {
- JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
- JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
- };
-
- Label packed_object_values(this), holey_object_values(this),
- packed_double_values(this), holey_double_values(this);
- Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &packed_object_values, &holey_object_values,
- &packed_object_values, &holey_object_values, &packed_double_values,
- &holey_double_values, &packed_object_values, &holey_object_values,
- &packed_object_values, &holey_object_values, &packed_double_values,
- &holey_double_values};
-
- Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
- BIND(&packed_object_values);
+ Node* elements = LoadElements(array);
+ Label if_packed(this), if_holey(this), if_packed_double(this),
+ if_holey_double(this), if_unknown_kind(this, Label::kDeferred);
+ int32_t kinds[] = {// Handled by if_packed.
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
+ // Handled by if_holey.
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
+ // Handled by if_packed_double.
+ PACKED_DOUBLE_ELEMENTS,
+ // Handled by if_holey_double.
+ HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
+ &if_packed, &if_packed,
+ // HOLEY_{SMI,}_ELEMENTS
+ &if_holey, &if_holey,
+ // PACKED_DOUBLE_ELEMENTS
+ &if_packed_double,
+ // HOLEY_DOUBLE_ELEMENTS
+ &if_holey_double};
+ Switch(elements_kind, &if_unknown_kind, kinds, labels, arraysize(kinds));
+
+ BIND(&if_packed);
{
var_value.Bind(LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS));
Goto(&allocate_entry_if_needed);
}
- BIND(&packed_double_values);
+ BIND(&if_holey);
{
- Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
- var_value.Bind(AllocateHeapNumberWithValue(value));
+ Node* element = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
+ var_value.Bind(element);
+ GotoIfNot(WordEqual(element, TheHoleConstant()),
+ &allocate_entry_if_needed);
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
+ var_value.Bind(UndefinedConstant());
Goto(&allocate_entry_if_needed);
}
- BIND(&holey_object_values);
+ BIND(&if_packed_double);
{
- // Check the no_elements_protector cell, and take the slow path if it's
- // invalid.
- GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
-
- var_value.Bind(UndefinedConstant());
- Node* value = LoadFixedArrayElement(elements, index, 0, SMI_PARAMETERS);
- GotoIf(WordEqual(value, TheHoleConstant()), &allocate_entry_if_needed);
- var_value.Bind(value);
+ Node* value = LoadFixedDoubleArrayElement(
+ elements, index, MachineType::Float64(), 0, SMI_PARAMETERS);
+ var_value.Bind(AllocateHeapNumberWithValue(value));
Goto(&allocate_entry_if_needed);
}
- BIND(&holey_double_values);
+ BIND(&if_holey_double);
{
- // Check the no_elements_protector cell, and take the slow path if it's
- // invalid.
- GotoIf(IsNoElementsProtectorCellInvalid(), &generic_values);
-
- var_value.Bind(UndefinedConstant());
+ Label if_hole(this, Label::kDeferred);
Node* value = LoadFixedDoubleArrayElement(
- elements, index, MachineType::Float64(), 0, SMI_PARAMETERS,
- &allocate_entry_if_needed);
+ elements, index, MachineType::Float64(), 0, SMI_PARAMETERS, &if_hole);
var_value.Bind(AllocateHeapNumberWithValue(value));
Goto(&allocate_entry_if_needed);
+ BIND(&if_hole);
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
+ var_value.Bind(UndefinedConstant());
+ Goto(&allocate_entry_if_needed);
}
+
+ BIND(&if_unknown_kind);
+ Unreachable();
}
- BIND(&if_isnotfastarray);
+ BIND(&if_other);
{
- Label if_istypedarray(this), if_isgeneric(this);
-
// If a is undefined, return CreateIterResultObject(undefined, true)
GotoIf(IsUndefined(array), &allocate_iterator_result);
- Node* array_type = LoadInstanceType(array);
- Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
- &if_isgeneric);
-
- BIND(&if_isgeneric);
- {
- Label if_wasfastarray(this);
+ Node* length =
+ CallBuiltin(Builtins::kToLength, context,
+ GetProperty(context, array, factory()->length_string()));
- Node* length = nullptr;
- {
- VARIABLE(var_length, MachineRepresentation::kTagged);
- Label if_isarray(this), if_isnotarray(this), done(this);
- Branch(InstanceTypeEqual(array_type, JS_ARRAY_TYPE), &if_isarray,
- &if_isnotarray);
+ GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
- BIND(&if_isarray);
- {
- var_length.Bind(LoadJSArrayLength(array));
+ var_value.Bind(index);
+ StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+ NumberInc(index));
+ var_done.Bind(FalseConstant());
- // Invalidate protector cell if needed
- Branch(WordNotEqual(orig_map, UndefinedConstant()), &if_wasfastarray,
- &done);
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
+ Goto(&if_generic);
+ }
- BIND(&if_wasfastarray);
- {
- Label if_invalid(this, Label::kDeferred);
- // A fast array iterator transitioned to a slow iterator during
- // iteration. Invalidate fast_array_iteration_protector cell to
- // prevent potential deopt loops.
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectMapOffset,
- UndefinedConstant());
- GotoIf(Uint32LessThanOrEqual(
- instance_type,
- Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
- &done);
-
- Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
- StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, invalid);
- Goto(&done);
- }
- }
+ BIND(&if_generic);
+ {
+ var_value.Bind(GetProperty(context, array, index));
+ Goto(&allocate_entry_if_needed);
+ }
- BIND(&if_isnotarray);
- {
- Node* length =
- GetProperty(context, array, factory()->length_string());
- var_length.Bind(ToLength_Inline(context, length));
- Goto(&done);
- }
+ BIND(&if_typedarray);
+ {
+ Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
- BIND(&done);
- length = var_length.value();
- }
+ Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
- GotoIfNumberGreaterThanOrEqual(index, length, &set_done);
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, TaggedIsSmi(index));
- StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
- NumberInc(index));
- var_done.Bind(FalseConstant());
+ GotoIfNot(SmiBelow(index, length), &set_done);
- Branch(
- Uint32LessThanOrEqual(
- instance_type, Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
- &allocate_key_result, &generic_values);
+ var_value.Bind(index);
+ Node* one = SmiConstant(1);
+ StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
+ SmiAdd(index, one));
+ var_done.Bind(FalseConstant());
- BIND(&generic_values);
- {
- var_value.Bind(GetProperty(context, array, index));
- Goto(&allocate_entry_if_needed);
- }
- }
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kKeys))),
+ &allocate_iterator_result);
- BIND(&if_istypedarray);
- {
- Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_isdetached);
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ Node* elements = LoadElements(array);
+ Node* base_ptr =
+ LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* external_ptr =
+ LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+
+ Label if_unknown_type(this, Label::kDeferred);
+ int32_t elements_kinds[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
- Node* length = LoadObjectField(array, JSTypedArray::kLengthOffset);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ Label if_##type##array(this);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
- CSA_ASSERT(this, TaggedIsSmi(length));
- CSA_ASSERT(this, TaggedIsSmi(index));
+ Label* elements_kind_labels[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) &if_##type##array,
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ };
+ STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
- GotoIfNot(SmiBelow(index, length), &set_done);
+ Switch(elements_kind, &if_unknown_type, elements_kinds,
+ elements_kind_labels, arraysize(elements_kinds));
- Node* one = SmiConstant(1);
- StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kNextIndexOffset, SmiAdd(index, one));
- var_done.Bind(FalseConstant());
+ BIND(&if_unknown_type);
+ Unreachable();
- Node* elements = LoadElements(array);
- Node* base_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ BIND(&if_##type##array); \
+ { \
+ var_value.Bind(LoadFixedTypedArrayElementAsTagged( \
+ data_ptr, index, TYPE##_ELEMENTS, SMI_PARAMETERS)); \
+ Goto(&allocate_entry_if_needed); \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
- static int32_t kInstanceType[] = {
- JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
- JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
- JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
- JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
- JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE,
- JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE,
- };
-
- Label uint8_values(this), int8_values(this), uint16_values(this),
- int16_values(this), uint32_values(this), int32_values(this),
- float32_values(this), float64_values(this), biguint64_values(this),
- bigint64_values(this);
- Label* kInstanceTypeHandlers[] = {
- &allocate_key_result, &uint8_values, &uint8_values,
- &int8_values, &uint16_values, &int16_values,
- &uint32_values, &int32_values, &float32_values,
- &float64_values, &biguint64_values, &bigint64_values,
- &uint8_values, &uint8_values, &int8_values,
- &uint16_values, &int16_values, &uint32_values,
- &int32_values, &float32_values, &float64_values,
- &biguint64_values, &bigint64_values,
- };
-
- var_done.Bind(FalseConstant());
- Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
-
- BIND(&uint8_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT8_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int8_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT8_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&uint16_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT16_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int16_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT16_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&uint32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, UINT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&int32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, INT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&float32_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, FLOAT32_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&float64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, FLOAT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&biguint64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, BIGUINT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- BIND(&bigint64_values);
- {
- var_value.Bind(LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, BIGINT64_ELEMENTS, SMI_PARAMETERS));
- Goto(&allocate_entry_if_needed);
- }
- }
+ BIND(&if_detached);
+ ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
BIND(&set_done);
@@ -3790,17 +3791,11 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Goto(&allocate_iterator_result);
}
- BIND(&allocate_key_result);
- {
- var_value.Bind(index);
- var_done.Bind(FalseConstant());
- Goto(&allocate_iterator_result);
- }
-
BIND(&allocate_entry_if_needed);
{
- GotoIf(Uint32LessThan(Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE),
- instance_type),
+ GotoIf(Word32Equal(LoadAndUntagToWord32ObjectField(
+ iterator, JSArrayIterator::kKindOffset),
+ Int32Constant(static_cast<int>(IterationKind::kValues))),
&allocate_iterator_result);
Node* elements = AllocateFixedArray(PACKED_ELEMENTS, IntPtrConstant(2));
@@ -3845,9 +3840,261 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
StringConstant(method_name), iterator);
}
+}
+
+namespace {
+
+class ArrayFlattenAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayFlattenAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ // https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+ Node* FlattenIntoArray(Node* context, Node* target, Node* source,
+ Node* source_length, Node* start, Node* depth,
+ Node* mapper_function = nullptr,
+ Node* this_arg = nullptr) {
+ CSA_ASSERT(this, IsJSReceiver(target));
+ CSA_ASSERT(this, IsJSReceiver(source));
+ CSA_ASSERT(this, IsNumberPositive(source_length));
+ CSA_ASSERT(this, IsNumberPositive(start));
+ CSA_ASSERT(this, IsNumber(depth));
+
+ // 1. Let targetIndex be start.
+ VARIABLE(var_target_index, MachineRepresentation::kTagged, start);
+
+ // 2. Let sourceIndex be 0.
+ VARIABLE(var_source_index, MachineRepresentation::kTagged, SmiConstant(0));
+
+ // 3. Repeat...
+ Label loop(this, {&var_target_index, &var_source_index}), done_loop(this);
+ Goto(&loop);
+ BIND(&loop);
+ {
+ Node* const source_index = var_source_index.value();
+ Node* const target_index = var_target_index.value();
+
+ // ...while sourceIndex < sourceLen
+ GotoIfNumberGreaterThanOrEqual(source_index, source_length, &done_loop);
+
+ // a. Let P be ! ToString(sourceIndex).
+ // b. Let exists be ? HasProperty(source, P).
+ CSA_ASSERT(this, SmiGreaterThanOrEqual(source_index, SmiConstant(0)));
+ Node* const exists =
+ HasProperty(source, source_index, context, kHasProperty);
+
+ // c. If exists is true, then
+ Label next(this);
+ GotoIfNot(IsTrue(exists), &next);
+ {
+ // i. Let element be ? Get(source, P).
+ Node* element = GetProperty(context, source, source_index);
+
+ // ii. If mapperFunction is present, then
+ if (mapper_function != nullptr) {
+ CSA_ASSERT(this, Word32Or(IsUndefined(mapper_function),
+ IsCallable(mapper_function)));
+ DCHECK_NOT_NULL(this_arg);
+
+ // 1. Set element to ? Call(mapperFunction, thisArg , Ā« element,
+ // sourceIndex, source Ā»).
+ element =
+ CallJS(CodeFactory::Call(isolate()), context, mapper_function,
+ this_arg, element, source_index, source);
+ }
+
+ // iii. Let shouldFlatten be false.
+ Label if_flatten_array(this), if_flatten_proxy(this, Label::kDeferred),
+ if_noflatten(this);
+ // iv. If depth > 0, then
+ GotoIfNumberGreaterThanOrEqual(SmiConstant(0), depth, &if_noflatten);
+ // 1. Set shouldFlatten to ? IsArray(element).
+ GotoIf(TaggedIsSmi(element), &if_noflatten);
+ GotoIf(IsJSArray(element), &if_flatten_array);
+ GotoIfNot(IsJSProxy(element), &if_noflatten);
+ Branch(IsTrue(CallRuntime(Runtime::kArrayIsArray, context, element)),
+ &if_flatten_proxy, &if_noflatten);
+
+ BIND(&if_flatten_array);
+ {
+ CSA_ASSERT(this, IsJSArray(element));
+
+ // 1. Let elementLen be ? ToLength(? Get(element, "length")).
+ Node* const element_length =
+ LoadObjectField(element, JSArray::kLengthOffset);
+
+ // 2. Set targetIndex to ? FlattenIntoArray(target, element,
+ // elementLen, targetIndex,
+ // depth - 1).
+ var_target_index.Bind(
+ CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
+ element_length, target_index, NumberDec(depth)));
+ Goto(&next);
+ }
+
+ BIND(&if_flatten_proxy);
+ {
+ CSA_ASSERT(this, IsJSProxy(element));
+
+ // 1. Let elementLen be ? ToLength(? Get(element, "length")).
+ Node* const element_length = ToLength_Inline(
+ context, GetProperty(context, element, LengthStringConstant()));
+
+ // 2. Set targetIndex to ? FlattenIntoArray(target, element,
+ // elementLen, targetIndex,
+ // depth - 1).
+ var_target_index.Bind(
+ CallBuiltin(Builtins::kFlattenIntoArray, context, target, element,
+ element_length, target_index, NumberDec(depth)));
+ Goto(&next);
+ }
+
+ BIND(&if_noflatten);
+ {
+ // 1. If targetIndex >= 2^53-1, throw a TypeError exception.
+ Label throw_error(this, Label::kDeferred);
+ GotoIfNumberGreaterThanOrEqual(
+ target_index, NumberConstant(kMaxSafeInteger), &throw_error);
+
+ // 2. Perform ? CreateDataPropertyOrThrow(target,
+ // ! ToString(targetIndex),
+ // element).
+ CallRuntime(Runtime::kCreateDataProperty, context, target,
+ target_index, element);
+
+ // 3. Increase targetIndex by 1.
+ var_target_index.Bind(NumberInc(target_index));
+ Goto(&next);
+
+ BIND(&throw_error);
+ ThrowTypeError(context, MessageTemplate::kFlattenPastSafeLength,
+ source_length, target_index);
+ }
+ }
+ BIND(&next);
+
+ // d. Increase sourceIndex by 1.
+ var_source_index.Bind(NumberInc(source_index));
+ Goto(&loop);
+ }
+
+ BIND(&done_loop);
+ return var_target_index.value();
+ }
+};
+
+} // namespace
+
+// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+TF_BUILTIN(FlattenIntoArray, ArrayFlattenAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const source = Parameter(Descriptor::kSource);
+ Node* const source_length = Parameter(Descriptor::kSourceLength);
+ Node* const start = Parameter(Descriptor::kStart);
+ Node* const depth = Parameter(Descriptor::kDepth);
+
+ Return(
+ FlattenIntoArray(context, target, source, source_length, start, depth));
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray
+TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
+ Node* const context = Parameter(Descriptor::kContext);
+ Node* const target = Parameter(Descriptor::kTarget);
+ Node* const source = Parameter(Descriptor::kSource);
+ Node* const source_length = Parameter(Descriptor::kSourceLength);
+ Node* const start = Parameter(Descriptor::kStart);
+ Node* const depth = Parameter(Descriptor::kDepth);
+ Node* const mapper_function = Parameter(Descriptor::kMapperFunction);
+ Node* const this_arg = Parameter(Descriptor::kThisArg);
+
+ Return(FlattenIntoArray(context, target, source, source_length, start, depth,
+ mapper_function, this_arg));
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten
+TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const receiver = args.GetReceiver();
+ Node* const depth = args.GetOptionalArgumentValue(0);
+
+ // 1. Let O be ? ToObject(this value).
+ Node* const o = ToObject(context, receiver);
+
+ // 2. Let sourceLen be ? ToLength(? Get(O, "length")).
+ Node* const source_length =
+ ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
+
+ // 3. Let depthNum be 1.
+ VARIABLE(var_depth_num, MachineRepresentation::kTagged, SmiConstant(1));
+
+ // 4. If depth is not undefined, then
+ Label done(this);
+ GotoIf(IsUndefined(depth), &done);
+ {
+ // a. Set depthNum to ? ToInteger(depth).
+ var_depth_num.Bind(ToInteger_Inline(context, depth));
+ Goto(&done);
+ }
+ BIND(&done);
+
+ // 5. Let A be ? ArraySpeciesCreate(O, 0).
+ Node* const constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
+ Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, SmiConstant(0));
+
+ // 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, depthNum).
+ CallBuiltin(Builtins::kFlattenIntoArray, context, a, o, source_length,
+ SmiConstant(0), var_depth_num.value());
+
+ // 7. Return A.
+ args.PopAndReturn(a);
+}
+
+// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
+TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
+ Node* const argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+ Node* const receiver = args.GetReceiver();
+ Node* const mapper_function = args.GetOptionalArgumentValue(0);
+
+ // 1. Let O be ? ToObject(this value).
+ Node* const o = ToObject(context, receiver);
+
+ // 2. Let sourceLen be ? ToLength(? Get(O, "length")).
+ Node* const source_length =
+ ToLength_Inline(context, GetProperty(context, o, LengthStringConstant()));
+
+ // 3. If IsCallable(mapperFunction) is false, throw a TypeError exception.
+ Label if_not_callable(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(mapper_function), &if_not_callable);
+ GotoIfNot(IsCallable(mapper_function), &if_not_callable);
+
+ // 4. If thisArg is present, let T be thisArg; else let T be undefined.
+ Node* const t = args.GetOptionalArgumentValue(1);
+
+ // 5. Let A be ? ArraySpeciesCreate(O, 0).
+ Node* const constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
+ Node* const a = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, SmiConstant(0));
+
+ // 6. Perform ? FlattenIntoArray(A, O, sourceLen, 0, 1, mapperFunction, T).
+ CallBuiltin(Builtins::kFlatMapIntoArray, context, a, o, source_length,
+ SmiConstant(0), SmiConstant(1), mapper_function, t);
+
+ // 7. Return A.
+ args.PopAndReturn(a);
- BIND(&if_isdetached);
- ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
+ BIND(&if_not_callable);
+ { ThrowTypeError(context, MessageTemplate::kMapperFunctionNonCallable); }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 67ac51480c..aabd4bab6e 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -73,8 +73,8 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> receiver() { return receiver_; }
Node* new_target() { return new_target_; }
TNode<IntPtrT> argc() { return argc_; }
- Node* o() { return o_; }
- Node* len() { return len_; }
+ TNode<JSReceiver> o() { return o_; }
+ TNode<Number> len() { return len_; }
Node* callbackfn() { return callbackfn_; }
Node* this_arg() { return this_arg_; }
Node* k() { return k_.value(); }
@@ -95,7 +95,8 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
ForEachDirection direction = ForEachDirection::kForward);
void InitIteratingArrayBuiltinLoopContinuation(
TNode<Context> context, TNode<Object> receiver, Node* callbackfn,
- Node* this_arg, Node* a, Node* o, Node* initial_k, Node* len, Node* to);
+ Node* this_arg, Node* a, TNode<JSReceiver> o, Node* initial_k,
+ TNode<Number> len, Node* to);
void GenerateIteratingTypedArrayBuiltinBody(
const char* name, const BuiltinResultGenerator& generator,
@@ -112,13 +113,15 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void VisitAllTypedArrayElements(Node* array_buffer,
const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction);
+ Label* detached, ForEachDirection direction,
+ TNode<JSTypedArray> typed_array);
void VisitAllFastElementsOneKind(ElementsKind kind,
const CallResultProcessor& processor,
Label* array_changed, ParameterMode mode,
ForEachDirection direction,
- MissingPropertyMode missing_property_mode);
+ MissingPropertyMode missing_property_mode,
+ TNode<Smi> length);
void HandleFastElements(const CallResultProcessor& processor,
const PostLoopAction& action, Label* slow,
@@ -131,12 +134,12 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
void GenerateArraySpeciesCreate();
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- void GenerateArraySpeciesCreate(SloppyTNode<Smi> len);
+ void GenerateArraySpeciesCreate(TNode<Number> len);
Node* callbackfn_ = nullptr;
- Node* o_ = nullptr;
+ TNode<JSReceiver> o_;
Node* this_arg_ = nullptr;
- Node* len_ = nullptr;
+ TNode<Number> len_;
TNode<Context> context_;
TNode<Object> receiver_;
Node* new_target_ = nullptr;
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index f400e824f0..f57c7d39ca 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -78,7 +78,7 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
}
// Returns |false| if not applicable.
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Handle<Object> receiver,
BuiltinArguments* args,
@@ -128,9 +128,8 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
return true;
}
-MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
- Handle<JSFunction> function,
- BuiltinArguments args) {
+V8_WARN_UNUSED_RESULT static Object* CallJsIntrinsic(
+ Isolate* isolate, Handle<JSFunction> function, BuiltinArguments args) {
HandleScope handleScope(isolate);
int argc = args.length() - 1;
ScopedVector<Handle<Object>> argv(argc);
@@ -240,75 +239,6 @@ BUILTIN(ArrayUnshift) {
return Smi::FromInt(new_length);
}
-BUILTIN(ArraySlice) {
- HandleScope scope(isolate);
- Handle<Object> receiver = args.receiver();
- int len = -1;
- int relative_start = 0;
- int relative_end = 0;
-
- if (receiver->IsJSArray()) {
- DisallowHeapAllocation no_gc;
- JSArray* array = JSArray::cast(*receiver);
- if (V8_UNLIKELY(!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(isolate, array) ||
- !isolate->IsSpeciesLookupChainIntact() ||
- // If this is a subclass of Array, then call out to JS
- !array->HasArrayPrototype(isolate))) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- len = Smi::ToInt(array->length());
- } else if (receiver->IsJSObject() &&
- JSSloppyArgumentsObject::GetSloppyArgumentsLength(
- isolate, Handle<JSObject>::cast(receiver), &len)) {
- // Array.prototype.slice.call(arguments, ...) is quite a common idiom
- // (notably more than 50% of invocations in Web apps).
- // Treat it in C++ as well.
- DCHECK(JSObject::cast(*receiver)->HasFastElements() ||
- JSObject::cast(*receiver)->HasFastArgumentsElements());
- } else {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- DCHECK_LE(0, len);
- int argument_count = args.length() - 1;
- // Note carefully chosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- relative_start = 0;
- relative_end = len;
- if (argument_count > 0) {
- DisallowHeapAllocation no_gc;
- if (!ClampedToInteger(isolate, args[1], &relative_start)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- if (argument_count > 1) {
- Object* end_arg = args[2];
- // slice handles the end_arg specially
- if (end_arg->IsUndefined(isolate)) {
- relative_end = len;
- } else if (!ClampedToInteger(isolate, end_arg, &relative_end)) {
- AllowHeapAllocation allow_allocation;
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
- }
- }
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
-
- // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- uint32_t actual_end =
- (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
-
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- ElementsAccessor* accessor = object->GetElementsAccessor();
- return *accessor->Slice(object, actual_start, actual_end);
-}
-
BUILTIN(ArraySplice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
@@ -317,7 +247,7 @@ BUILTIN(ArraySplice) {
// If this is a subclass of Array, then call out to JS.
!Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) ||
// If anything with @@species has been messed with, call out to JS.
- !isolate->IsSpeciesLookupChainIntact())) {
+ !isolate->IsArraySpeciesLookupChainIntact())) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -403,7 +333,7 @@ class ArrayConcatVisitor {
~ArrayConcatVisitor() { clear_storage(); }
- MUST_USE_RESULT bool visit(uint32_t i, Handle<Object> elm) {
+ V8_WARN_UNUSED_RESULT bool visit(uint32_t i, Handle<Object> elm) {
uint32_t index = index_offset_ + i;
if (i >= JSObject::kMaxElementCount - index_offset_) {
@@ -481,7 +411,7 @@ class ArrayConcatVisitor {
return array;
}
- MUST_USE_RESULT MaybeHandle<JSReceiver> ToJSReceiver() {
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSReceiver> ToJSReceiver() {
DCHECK(!is_fixed_array());
Handle<JSReceiver> result = Handle<JSReceiver>::cast(storage_);
Handle<Object> length =
@@ -1187,7 +1117,7 @@ BUILTIN(ArrayConcat) {
// Avoid a real species read to avoid extra lookups to the array constructor
if (V8_LIKELY(receiver->IsJSArray() &&
Handle<JSArray>::cast(receiver)->HasArrayPrototype(isolate) &&
- isolate->IsSpeciesLookupChainIntact())) {
+ isolate->IsArraySpeciesLookupChainIntact())) {
if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
return *result_array;
}
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 4f9078b4b6..9c77a0047d 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -23,17 +23,6 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 21.1 ArrayBuffer Objects
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
-BUILTIN(ArrayBufferConstructor) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
- DCHECK(*target == target->native_context()->array_buffer_fun() ||
- *target == target->native_context()->shared_array_buffer_fun());
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->name(), isolate)));
-}
-
namespace {
Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
@@ -62,24 +51,30 @@ Object* ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
} // namespace
-// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
-BUILTIN(ArrayBufferConstructor_ConstructStub) {
+// ES #sec-arraybuffer-constructor
+BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> length = args.atOrUndefined(isolate, 1);
DCHECK(*target == target->native_context()->array_buffer_fun() ||
*target == target->native_context()->shared_array_buffer_fun());
-
- Handle<Object> number_length;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
- Object::ToInteger(isolate, length));
- if (number_length->Number() < 0.0) {
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ handle(target->shared()->Name(), isolate)));
+ } else { // [[Construct]]
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> length = args.atOrUndefined(isolate, 1);
+
+ Handle<Object> number_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_length,
+ Object::ToInteger(isolate, length));
+ if (number_length->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+
+ return ConstructBuffer(isolate, target, new_target, number_length, true);
}
-
- return ConstructBuffer(isolate, target, new_target, number_length, true);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 7958afba00..073c96a2e0 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
@@ -44,7 +44,7 @@ void AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
// When debugging, we need to link from the {generator} to the
// {outer_promise} of the async function/generator.
Label done(this);
- GotoIfNot(IsDebugActive(), &done);
+ GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &done);
CallRuntime(Runtime::kSetProperty, native_context, generator,
LoadRoot(Heap::kgenerator_outer_promise_symbolRootIndex),
outer_promise, SmiConstant(LanguageMode::kStrict));
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index b78747aaa9..290252da62 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -29,13 +29,13 @@ class AsyncGeneratorBuiltinsAssembler : public AsyncBuiltinsAssembler {
: AsyncBuiltinsAssembler(state) {}
inline Node* TaggedIsAsyncGenerator(Node* tagged_object) {
- Node* if_notsmi = TaggedIsNotSmi(tagged_object);
- return Select(if_notsmi,
- [=]() {
- return HasInstanceType(tagged_object,
- JS_ASYNC_GENERATOR_OBJECT_TYPE);
- },
- [=]() { return if_notsmi; }, MachineRepresentation::kBit);
+ TNode<BoolT> if_notsmi = TaggedIsNotSmi(tagged_object);
+ return Select<BoolT>(if_notsmi,
+ [=] {
+ return HasInstanceType(
+ tagged_object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
+ },
+ [=] { return if_notsmi; });
}
inline Node* LoadGeneratorState(Node* const generator) {
return LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
@@ -518,11 +518,34 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
done);
}
- // Perform Call(promiseCapability.[[Resolve]], undefined, Ā«iteratorResultĀ»).
- CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ // We know that {iter_result} itself doesn't have any "then" property and
+ // we also know that the [[Prototype]] of {iter_result} is the intrinsic
+ // %ObjectPrototype%. So we can skip the [[Resolve]] logic here completely
+ // and directly call into the FulfillPromise operation if we can prove
+ // that the %ObjectPrototype% also doesn't have any "then" property. This
+ // is guarded by the Promise#then protector.
+ Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
+ GotoIfForceSlowPath(&if_slow);
+ Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
+
+ BIND(&if_fast);
+ {
+ // Skip the "then" on {iter_result} and directly fulfill the {promise}
+ // with the {iter_result}.
+ CallBuiltin(Builtins::kFulfillPromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
+
+ BIND(&if_slow);
+ {
+ // Perform Call(promiseCapability.[[Resolve]], undefined, Ā«iteratorResultĀ»).
+ CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result);
+ Goto(&return_promise);
+ }
// Per spec, AsyncGeneratorResolve() returns undefined. However, for the
// benefit of %TraceExit(), return the Promise.
+ BIND(&return_promise);
Return(promise);
}
@@ -548,11 +571,42 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
Node* const request = LoadFirstAsyncGeneratorRequestFromQueue(generator);
Node* const outer_promise = LoadPromiseFromAsyncGeneratorRequest(request);
+ // Mark the generator as "awaiting".
SetGeneratorAwaiting(generator);
- Await(context, generator, value, outer_promise,
- Builtins::kAsyncGeneratorYieldFulfill,
- Builtins::kAsyncGeneratorAwaitReject, is_caught);
- Return(UndefinedConstant());
+
+ // We can skip the creation of a temporary promise and the whole
+ // [[Resolve]] logic if we already know that the {value} that's
+ // being yielded is a primitive, as in that case we would immediately
+ // fulfill the temporary promise anyways and schedule a fulfill
+ // reaction job. This gives a nice performance boost for async
+ // generators that yield only primitives, e.g. numbers or strings.
+ Label if_primitive(this), if_generic(this);
+ GotoIfForceSlowPath(&if_generic);
+ GotoIf(IsPromiseHookEnabledOrDebugIsActive(), &if_generic);
+ GotoIf(TaggedIsSmi(value), &if_primitive);
+ Branch(IsJSReceiver(value), &if_generic, &if_primitive);
+
+ BIND(&if_generic);
+ {
+ Await(context, generator, value, outer_promise,
+ Builtins::kAsyncGeneratorYieldFulfill,
+ Builtins::kAsyncGeneratorAwaitReject, is_caught);
+ Return(UndefinedConstant());
+ }
+
+ BIND(&if_primitive);
+ {
+ // For primitive {value}s we can skip the allocation of the temporary
+ // promise and the resolution of that, and directly allocate the fulfill
+ // reaction job.
+ Node* const microtask = AllocatePromiseReactionJobTask(
+ Heap::kPromiseFulfillReactionJobTaskMapRootIndex, context, value,
+ HeapConstant(Builtins::CallableFor(
+ isolate(), Builtins::kAsyncGeneratorYieldFulfill)
+ .code()),
+ generator);
+ TailCallBuiltin(Builtins::kEnqueueMicrotask, context, microtask);
+ }
}
TF_BUILTIN(AsyncGeneratorYieldFulfill, AsyncGeneratorBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index fdbd3937d4..6c04c9dcb7 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -13,29 +13,28 @@ namespace internal {
BUILTIN(BigIntConstructor) {
HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
-
- if (value->IsJSReceiver()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, value,
- JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
- ToPrimitiveHint::kNumber));
- }
-
- if (value->IsNumber()) {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
- } else {
- RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ if (value->IsJSReceiver()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(value),
+ ToPrimitiveHint::kNumber));
+ }
+
+ if (value->IsNumber()) {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromNumber(isolate, value));
+ } else {
+ RETURN_RESULT_OR_FAILURE(isolate, BigInt::FromObject(isolate, value));
+ }
+ } else { // [[Construct]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->BigInt_string()));
}
}
-BUILTIN(BigIntConstructor_ConstructStub) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor,
- isolate->factory()->BigInt_string()));
-}
-
BUILTIN(BigIntAsUintN) {
HandleScope scope(isolate);
Handle<Object> bits_obj = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index dabd803dc1..5dc42d506f 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -11,28 +11,27 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 19.3 Boolean Objects
+// ES #sec-boolean-objects
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+// ES #sec-boolean-constructor
BUILTIN(BooleanConstructor) {
HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(value->BooleanValue());
-}
-
-// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
-BUILTIN(BooleanConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->boolean_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue()));
- return *result;
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(value->BooleanValue());
+ } else { // [[Construct]]
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->boolean_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSValue>::cast(result)->set_value(
+ isolate->heap()->ToBoolean(value->BooleanValue()));
+ return *result;
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 7443202c98..34c88670f4 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -268,91 +268,88 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructDoubleVarargs(
void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
Node* target, Node* new_target, Node* spread, Node* args_count,
Node* context) {
- Label if_done(this), if_holey(this), if_runtime(this, Label::kDeferred);
+ Label if_smiorobject(this), if_double(this),
+ if_generic(this, Label::kDeferred);
- VARIABLE(spread_result, MachineRepresentation::kTagged, spread);
+ VARIABLE(var_length, MachineRepresentation::kWord32);
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ VARIABLE(var_elements_kind, MachineRepresentation::kWord32);
- GotoIf(TaggedIsSmi(spread), &if_runtime);
+ GotoIf(TaggedIsSmi(spread), &if_generic);
Node* spread_map = LoadMap(spread);
- GotoIfNot(IsJSArrayMap(spread_map), &if_runtime);
+ GotoIfNot(IsJSArrayMap(spread_map), &if_generic);
+
+ // Check that we have the original Array.prototype.
+ GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_generic);
- // Check that we have the original ArrayPrototype.
- GotoIfNot(IsPrototypeInitialArrayPrototype(context, spread_map), &if_runtime);
+ // Check that there are no elements on the Array.prototype chain.
+ GotoIf(IsNoElementsProtectorCellInvalid(), &if_generic);
- // Check that the ArrayPrototype hasn't been modified in a way that would
+ // Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
- GotoIfNot(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Isolate::kProtectorValid)),
- &if_runtime);
-
- // Check that the map of the initial array iterator hasn't changed.
- TNode<Context> native_context = LoadNativeContext(context);
- GotoIfNot(HasInitialArrayIteratorPrototypeMap(native_context), &if_runtime);
-
- Node* kind = LoadMapElementsKind(spread_map);
-
- STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
- STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(PACKED_ELEMENTS == 2);
- STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
- STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
- STATIC_ASSERT(LAST_FAST_ELEMENTS_KIND == HOLEY_DOUBLE_ELEMENTS);
-
- GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &if_runtime);
- Branch(Word32And(kind, Int32Constant(1)), &if_holey, &if_done);
-
- // Check the NoElementsProtector cell for holey arrays.
- BIND(&if_holey);
- { Branch(IsNoElementsProtectorCellInvalid(), &if_runtime, &if_done); }
-
- BIND(&if_runtime);
+ GotoIf(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorInvalid)),
+ &if_generic);
+
+ // The fast-path accesses the {spread} elements directly.
+ Node* spread_kind = LoadMapElementsKind(spread_map);
+ var_elements_kind.Bind(spread_kind);
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(spread, JSArray::kLengthOffset));
+ var_elements.Bind(LoadObjectField(spread, JSArray::kElementsOffset));
+
+ // Check elements kind of {spread}.
+ GotoIf(Int32LessThan(spread_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_smiorobject);
+ Branch(Int32GreaterThan(spread_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_generic, &if_double);
+
+ BIND(&if_generic);
{
- Node* spread_iterable = LoadContextElement(LoadNativeContext(context),
- Context::SPREAD_ITERABLE_INDEX);
- spread_result.Bind(CallJS(CodeFactory::Call(isolate()), context,
- spread_iterable, UndefinedConstant(), spread));
- CSA_ASSERT(this, IsJSArray(spread_result.value()));
- Goto(&if_done);
+ Label if_iterator_fn_not_callable(this, Label::kDeferred);
+ Node* iterator_fn = GetProperty(context, spread, IteratorSymbolConstant());
+ GotoIf(TaggedIsSmi(iterator_fn), &if_iterator_fn_not_callable);
+ GotoIfNot(IsCallable(iterator_fn), &if_iterator_fn_not_callable);
+ Node* list =
+ CallBuiltin(Builtins::kIterableToList, context, spread, iterator_fn);
+ CSA_ASSERT(this, IsJSArray(list));
+ Node* list_kind = LoadMapElementsKind(LoadMap(list));
+ var_length.Bind(
+ LoadAndUntagToWord32ObjectField(list, JSArray::kLengthOffset));
+ var_elements.Bind(LoadObjectField(list, JSArray::kElementsOffset));
+ var_elements_kind.Bind(list_kind);
+ Branch(Int32LessThan(list_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_smiorobject, &if_double);
+
+ BIND(&if_iterator_fn_not_callable);
+ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable);
}
- BIND(&if_done);
+ BIND(&if_smiorobject);
{
- // The result from if_runtime can be an array of doubles.
- Label if_not_double(this), if_double(this);
- Node* elements =
- LoadObjectField(spread_result.value(), JSArray::kElementsOffset);
- Node* length = LoadAndUntagToWord32ObjectField(spread_result.value(),
- JSArray::kLengthOffset);
-
- Node* kind = LoadMapElementsKind(LoadMap(elements));
- CSA_ASSERT(this, Int32LessThanOrEqual(
- kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)));
-
- Branch(Int32GreaterThan(kind, Int32Constant(HOLEY_ELEMENTS)), &if_double,
- &if_not_double);
+ Node* const elements = var_elements.value();
+ Node* const length = var_length.value();
- BIND(&if_not_double);
- {
- if (new_target == nullptr) {
- Callable callable = CodeFactory::CallVarargs(isolate());
- TailCallStub(callable, context, target, args_count, elements, length);
- } else {
- Callable callable = CodeFactory::ConstructVarargs(isolate());
- TailCallStub(callable, context, target, new_target, args_count,
- elements, length);
- }
+ if (new_target == nullptr) {
+ Callable callable = CodeFactory::CallVarargs(isolate());
+ TailCallStub(callable, context, target, args_count, elements, length);
+ } else {
+ Callable callable = CodeFactory::ConstructVarargs(isolate());
+ TailCallStub(callable, context, target, new_target, args_count, elements,
+ length);
}
+ }
- BIND(&if_double);
- {
- CallOrConstructDoubleVarargs(target, new_target, elements, length,
- args_count, context, kind);
- }
+ BIND(&if_double);
+ {
+ Node* const elements_kind = var_elements_kind.value();
+ Node* const elements = var_elements.value();
+ Node* const length = var_length.value();
+
+ CallOrConstructDoubleVarargs(target, new_target, elements, length,
+ args_count, context, elements_kind);
}
}
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 563703707c..5c3f263746 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -6,7 +6,7 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/hash-table.h"
namespace v8 {
@@ -237,6 +237,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
TNode<Map> original_fast_js_array_map = LoadMap(fast_jsarray);
#endif
Label exit(this), if_doubles(this), if_smiorobjects(this);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &exit);
Branch(IsFastSmiOrTaggedElementsKind(elements_kind), &if_smiorobjects,
&if_doubles);
BIND(&if_smiorobjects);
@@ -333,8 +334,7 @@ TNode<Object> BaseCollectionsAssembler::AllocateJSCollection(
[=] {
return AllocateJSCollectionSlow(context, constructor,
new_target);
- },
- MachineRepresentation::kTagged);
+ });
}
TNode<Object> BaseCollectionsAssembler::AllocateJSCollectionFast(
@@ -456,7 +456,7 @@ TNode<IntPtrT> BaseCollectionsAssembler::EstimatedInitialSize(
return Select<IntPtrT>(
is_fast_jsarray,
[=] { return SmiUntag(LoadFastJSArrayLength(CAST(initial_entries))); },
- [=] { return IntPtrConstant(0); }, MachineType::PointerRepresentation());
+ [=] { return IntPtrConstant(0); });
}
void BaseCollectionsAssembler::GotoIfNotJSReceiver(Node* const obj,
@@ -492,10 +492,9 @@ TNode<BoolT> BaseCollectionsAssembler::HasInitialCollectionPrototype(
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedArrayElement(
TNode<Object> elements, TNode<IntPtrT> index) {
- TNode<Object> element = CAST(LoadFixedArrayElement(elements, index));
+ TNode<Object> element = LoadFixedArrayElement(elements, index);
return Select<Object>(IsTheHole(element), [=] { return UndefinedConstant(); },
- [=] { return element; },
- MachineRepresentation::kTagged);
+ [=] { return element; });
}
TNode<Object> BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement(
@@ -1060,12 +1059,12 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
std::function<void(Node*, Label*, Label*)> key_compare,
Variable* entry_start_position, Label* entry_found, Label* not_found) {
// Get the index of the bucket.
- Node* const number_of_buckets = SmiUntag(
- LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex));
+ Node* const number_of_buckets = SmiUntag(CAST(
+ LoadFixedArrayElement(table, CollectionType::kNumberOfBucketsIndex)));
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const first_entry = SmiUntag(LoadFixedArrayElement(
- table, bucket, CollectionType::kHashTableStartIndex * kPointerSize));
+ Node* const first_entry = SmiUntag(CAST(LoadFixedArrayElement(
+ table, bucket, CollectionType::kHashTableStartIndex * kPointerSize)));
// Walk the bucket chain.
Node* entry_start;
@@ -1088,10 +1087,10 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
UintPtrLessThan(
var_entry.value(),
SmiUntag(SmiAdd(
- LoadFixedArrayElement(table,
- CollectionType::kNumberOfElementsIndex),
- LoadFixedArrayElement(
- table, CollectionType::kNumberOfDeletedElementsIndex)))));
+ CAST(LoadFixedArrayElement(
+ table, CollectionType::kNumberOfElementsIndex)),
+ CAST(LoadFixedArrayElement(
+ table, CollectionType::kNumberOfDeletedElementsIndex))))));
// Compute the index of the entry relative to kHashTableStartIndex.
entry_start =
@@ -1108,10 +1107,10 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry(
BIND(&continue_next_entry);
// Load the index of the next entry in the bucket chain.
- var_entry.Bind(SmiUntag(LoadFixedArrayElement(
+ var_entry.Bind(SmiUntag(CAST(LoadFixedArrayElement(
table, entry_start,
(CollectionType::kHashTableStartIndex + CollectionType::kChainOffset) *
- kPointerSize)));
+ kPointerSize))));
Goto(&loop);
}
@@ -1362,8 +1361,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(
- LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(
+ LoadFixedArrayElement(table, OrderedHashMap::kNumberOfBucketsIndex))));
STATIC_ASSERT(OrderedHashMap::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1378,8 +1377,8 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kMapGrow, context, receiver);
table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(LoadFixedArrayElement(
- table_var.value(), OrderedHashMap::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ table_var.value(), OrderedHashMap::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashMap::kNumberOfElementsOffset)));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
@@ -1518,7 +1517,7 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
&add_entry);
// Otherwise, go to runtime to compute the hash code.
- entry_start_position_or_hash.Bind(SmiUntag((CallGetOrCreateHashRaw(key))));
+ entry_start_position_or_hash.Bind(SmiUntag(CallGetOrCreateHashRaw(key)));
Goto(&add_entry);
}
@@ -1528,8 +1527,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
VARIABLE(table_var, MachineRepresentation::kTaggedPointer, table);
{
// Check we have enough space for the entry.
- number_of_buckets.Bind(SmiUntag(
- LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(
+ LoadFixedArrayElement(table, OrderedHashSet::kNumberOfBucketsIndex))));
STATIC_ASSERT(OrderedHashSet::kLoadFactor == 2);
Node* const capacity = WordShl(number_of_buckets.value(), 1);
@@ -1544,8 +1543,8 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) {
// fields.
CallRuntime(Runtime::kSetGrow, context, receiver);
table_var.Bind(LoadObjectField(receiver, JSMap::kTableOffset));
- number_of_buckets.Bind(SmiUntag(LoadFixedArrayElement(
- table_var.value(), OrderedHashSet::kNumberOfBucketsIndex)));
+ number_of_buckets.Bind(SmiUntag(CAST(LoadFixedArrayElement(
+ table_var.value(), OrderedHashSet::kNumberOfBucketsIndex))));
Node* const new_number_of_elements = SmiUntag(CAST(LoadObjectField(
table_var.value(), OrderedHashSet::kNumberOfElementsOffset)));
Node* const new_number_of_deleted = SmiUntag(CAST(LoadObjectField(
@@ -2222,7 +2221,7 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::FindKeyIndex(
TNode<IntPtrT> key_index;
{
key_index = KeyIndexFromEntry(var_entry.value());
- TNode<Object> entry_key = CAST(LoadFixedArrayElement(table, key_index));
+ TNode<Object> entry_key = LoadFixedArrayElement(table, key_index);
key_compare(entry_key, &if_found);
@@ -2271,15 +2270,15 @@ TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::KeyIndexFromEntry(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfElements(
TNode<Object> table, int offset) {
- TNode<IntPtrT> number_of_elements = SmiUntag(
- LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex));
+ TNode<IntPtrT> number_of_elements = SmiUntag(CAST(
+ LoadFixedArrayElement(table, ObjectHashTable::kNumberOfElementsIndex)));
return IntPtrAdd(number_of_elements, IntPtrConstant(offset));
}
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadNumberOfDeleted(
TNode<Object> table, int offset) {
- TNode<IntPtrT> number_of_deleted = SmiUntag(LoadFixedArrayElement(
- table, ObjectHashTable::kNumberOfDeletedElementsIndex));
+ TNode<IntPtrT> number_of_deleted = SmiUntag(CAST(LoadFixedArrayElement(
+ table, ObjectHashTable::kNumberOfDeletedElementsIndex)));
return IntPtrAdd(number_of_deleted, IntPtrConstant(offset));
}
@@ -2291,7 +2290,7 @@ TNode<Object> WeakCollectionsBuiltinsAssembler::LoadTable(
TNode<IntPtrT> WeakCollectionsBuiltinsAssembler::LoadTableCapacity(
TNode<Object> table) {
return SmiUntag(
- LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex));
+ CAST(LoadFixedArrayElement(table, ObjectHashTable::kCapacityIndex)));
}
TNode<Word32T> WeakCollectionsBuiltinsAssembler::InsufficientCapacityToAdd(
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index 75ad302d3d..138db2a422 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -112,13 +112,11 @@ void InstallContextFunction(Handle<JSObject> target, const char* name,
Handle<Object> context_name) {
Factory* const factory = target->GetIsolate()->factory();
- Handle<Code> code(target->GetIsolate()->builtins()->builtin(builtin_id));
-
Handle<String> name_string =
Name::ToFunctionName(factory->InternalizeUtf8String(name))
.ToHandleChecked();
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
- name_string, code, builtin_id, i::LanguageMode::kSloppy);
+ name_string, builtin_id, i::LanguageMode::kSloppy);
Handle<JSFunction> fun = factory->NewFunction(args);
fun->shared()->set_native(true);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 945fb4394b..e16945ba26 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -99,13 +99,12 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
// The calculation of |function_map_index| must be in sync with
// SharedFunctionInfo::function_map_index().
- Node* const compiler_hints = LoadObjectField(
- shared_function_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
- Node* const function_map_index =
- IntPtrAdd(DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(
- compiler_hints),
- IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
+ Node* const flags =
+ LoadObjectField(shared_function_info, SharedFunctionInfo::kFlagsOffset,
+ MachineType::Uint32());
+ Node* const function_map_index = IntPtrAdd(
+ DecodeWordFromWord32<SharedFunctionInfo::FunctionMapIndexBits>(flags),
+ IntPtrConstant(Context::FIRST_FUNCTION_MAP_INDEX));
CSA_ASSERT(this, UintPtrLessThanOrEqual(
function_map_index,
IntPtrConstant(Context::LAST_FUNCTION_MAP_INDEX)));
@@ -636,166 +635,163 @@ Node* ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral(
return result;
}
+// ES #sec-object-constructor
TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) {
int const kValueArg = 0;
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Node* value = args.GetOptionalArgumentValue(kValueArg);
Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
-
- Label return_to_object(this);
-
- GotoIf(Word32And(IsNotUndefined(value), IsNotNull(value)), &return_to_object);
-
- args.PopAndReturn(EmitCreateEmptyObjectLiteral(context));
-
- BIND(&return_to_object);
- args.PopAndReturn(ToObject(context, value));
-}
-
-TF_BUILTIN(ObjectConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- int const kValueArg = 0;
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
- Node* value = args.GetOptionalArgumentValue(kValueArg);
-
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_subclass(this, Label::kDeferred), if_notsubclass(this),
+ return_result(this);
+ GotoIf(IsUndefined(new_target), &if_notsubclass);
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Branch(WordEqual(new_target, target), &if_notsubclass, &if_subclass);
- CSA_ASSERT(this, IsNotUndefined(new_target));
+ BIND(&if_subclass);
+ {
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
- Label return_to_object(this);
+ BIND(&if_notsubclass);
+ {
+ Label if_newobject(this, Label::kDeferred), if_toobject(this);
- GotoIf(Word32And(WordEqual(target, new_target),
- Word32And(IsNotUndefined(value), IsNotNull(value))),
- &return_to_object);
- args.PopAndReturn(EmitFastNewObject(context, target, new_target));
+ Node* value_index = IntPtrConstant(kValueArg);
+ GotoIf(UintPtrGreaterThanOrEqual(value_index, argc), &if_newobject);
+ Node* value = args.AtIndex(value_index);
+ GotoIf(IsNull(value), &if_newobject);
+ Branch(IsUndefined(value), &if_newobject, &if_toobject);
- BIND(&return_to_object);
- args.PopAndReturn(ToObject(context, value));
+ BIND(&if_newobject);
+ {
+ Node* result = EmitCreateEmptyObjectLiteral(context);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
+
+ BIND(&if_toobject);
+ {
+ Node* result = CallBuiltin(Builtins::kToObject, context, value);
+ var_result.Bind(result);
+ Goto(&return_result);
+ }
+ }
+
+ BIND(&return_result);
+ args.PopAndReturn(var_result.value());
}
+// ES #sec-number-constructor
TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
+ Node* context = Parameter(BuiltinDescriptor::kContext);
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Label return_zero(this);
+ // 1. If no arguments were passed to this function invocation, let n be +0.
+ VARIABLE(var_n, MachineRepresentation::kTagged, SmiConstant(0));
+ Label if_nloaded(this, &var_n);
+ GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_nloaded);
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_zero);
+ // 2. Else,
+ // a. Let prim be ? ToNumeric(value).
+ // b. If Type(prim) is BigInt, let n be the Number value for prim.
+ // c. Otherwise, let n be prim.
+ Node* value = args.AtIndex(0);
+ var_n.Bind(ToNumber(context, value, BigIntHandling::kConvertToNumber));
+ Goto(&if_nloaded);
- Node* context = Parameter(BuiltinDescriptor::kContext);
- args.PopAndReturn(
- ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
+ BIND(&if_nloaded);
+ {
+ // 3. If NewTarget is undefined, return n.
+ Node* n_value = var_n.value();
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Label return_n(this), constructnumber(this, Label::kDeferred);
+ Branch(IsUndefined(new_target), &return_n, &constructnumber);
- BIND(&return_zero);
- args.PopAndReturn(SmiConstant(0));
+ BIND(&return_n);
+ { args.PopAndReturn(n_value); }
+
+ BIND(&constructnumber);
+ {
+ // 4. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%NumberPrototype%", Ā« [[NumberData]] Ā»).
+ // 5. Set O.[[NumberData]] to n.
+ // 6. Return O.
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, n_value);
+ args.PopAndReturn(result);
+ }
+ }
}
-TF_BUILTIN(NumberConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+// https://tc39.github.io/ecma262/#sec-string-constructor
+TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
Node* context = Parameter(BuiltinDescriptor::kContext);
-
Node* argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
- Label wrap(this);
-
- VARIABLE(var_result, MachineRepresentation::kTagged, SmiConstant(0));
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &wrap);
- var_result.Bind(
- ToNumber(context, args.AtIndex(0), BigIntHandling::kConvertToNumber));
- Goto(&wrap);
-
- BIND(&wrap);
- Node* result = EmitFastNewObject(context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, var_result.value());
- args.PopAndReturn(result);
-}
-
-Node* ConstructorBuiltinsAssembler::EmitConstructString(Node* argc,
- CodeStubArguments& args,
- Node* context,
- bool convert_symbol) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- Label return_empty_string(this), to_string(this),
- check_symbol(this, Label::kDeferred), done(this);
-
- GotoIf(IntPtrEqual(IntPtrConstant(0), argc), &return_empty_string);
-
- Node* argument = args.AtIndex(0);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
- GotoIf(TaggedIsSmi(argument), &to_string);
+ // 1. If no arguments were passed to this function invocation, let s be "".
+ VARIABLE(var_s, MachineRepresentation::kTagged, EmptyStringConstant());
+ Label if_sloaded(this, &var_s);
+ GotoIf(WordEqual(argc, IntPtrConstant(0)), &if_sloaded);
- Node* instance_type = LoadInstanceType(argument);
+ // 2. Else,
+ // a. If NewTarget is undefined [...]
+ Node* value = args.AtIndex(0);
+ Label if_tostring(this, &var_s);
+ GotoIfNot(IsUndefined(new_target), &if_tostring);
- Label* non_string = convert_symbol ? &check_symbol : &to_string;
- GotoIfNot(IsStringInstanceType(instance_type), non_string);
+ // 2a. [...] and Type(value) is Symbol, return SymbolDescriptiveString(value).
+ GotoIf(TaggedIsSmi(value), &if_tostring);
+ GotoIfNot(IsSymbol(value), &if_tostring);
{
- var_result.Bind(argument);
- Goto(&done);
+ Node* result =
+ CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+ args.PopAndReturn(result);
}
- if (convert_symbol) {
- BIND(&check_symbol);
- GotoIfNot(IsSymbolInstanceType(instance_type), &to_string);
- {
- var_result.Bind(
- CallRuntime(Runtime::kSymbolDescriptiveString, context, argument));
- Goto(&done);
- }
- }
-
- BIND(&to_string);
+ // 2b. Let s be ? ToString(value).
+ BIND(&if_tostring);
{
- var_result.Bind(ToString(context, argument));
- Goto(&done);
+ var_s.Bind(CallBuiltin(Builtins::kToString, context, value));
+ Goto(&if_sloaded);
}
- BIND(&return_empty_string);
+ // 3. If NewTarget is undefined, return s.
+ BIND(&if_sloaded);
{
- var_result.Bind(EmptyStringConstant());
- Goto(&done);
- }
-
- BIND(&done);
- return var_result.value();
-}
-
-TF_BUILTIN(StringConstructor, ConstructorBuiltinsAssembler) {
- Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- args.PopAndReturn(EmitConstructString(argc, args, context, true));
-}
-
-TF_BUILTIN(StringConstructor_ConstructStub, ConstructorBuiltinsAssembler) {
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
- Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* s_value = var_s.value();
+ Label return_s(this), constructstring(this, Label::kDeferred);
+ Branch(IsUndefined(new_target), &return_s, &constructstring);
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
+ BIND(&return_s);
+ { args.PopAndReturn(s_value); }
- Node* string = EmitConstructString(argc, args, context, false);
- Node* result = EmitFastNewObject(context, target, new_target);
- StoreObjectField(result, JSValue::kValueOffset, string);
- args.PopAndReturn(result);
+ BIND(&constructstring);
+ {
+ Node* result =
+ CallBuiltin(Builtins::kFastNewObject, context, target, new_target);
+ StoreObjectField(result, JSValue::kValueOffset, s_value);
+ args.PopAndReturn(result);
+ }
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index f6d71882bc..820970961b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -36,9 +36,6 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
Label* call_runtime);
- Node* EmitConstructString(Node* argc, CodeStubArguments& args, Node* context,
- bool convert_symbol);
-
private:
Node* NotHasBoilerplate(Node* literal_site);
Node* LoadAllocationSiteBoilerplate(Node* allocation_site);
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index dc3e8d53c4..63a6dc0a91 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -126,12 +126,12 @@ TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) {
}
TF_BUILTIN(ToNumeric, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> input = CAST(Parameter(Descriptor::kArgument));
- Return(Select(IsNumber(input), [=] { return input; },
- [=] { return NonNumberToNumeric(context, input); },
- MachineRepresentation::kTagged));
+ Return(Select<Numeric>(
+ IsNumber(input), [=] { return CAST(input); },
+ [=] { return NonNumberToNumeric(context, CAST(input)); }));
}
// ES6 section 7.1.3 ToNumber ( argument )
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 38b3d90649..49dcbe1e83 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -6,7 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -14,97 +14,94 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 24.2 DataView Objects
+// ES #sec-dataview-objects
-// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+// ES #sec-dataview-constructor
BUILTIN(DataViewConstructor) {
HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate,
- NewTypeError(MessageTemplate::kConstructorNotFunction,
- isolate->factory()->NewStringFromAsciiChecked("DataView")));
-}
-
-// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
-BUILTIN(DataViewConstructor_ConstructStub) {
- HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- Handle<Object> buffer = args.atOrUndefined(isolate, 1);
- Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
- Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
-
- // 2. If Type(buffer) is not Object, throw a TypeError exception.
- // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
- // TypeError exception.
- if (!buffer->IsJSArrayBuffer()) {
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
- }
- Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
-
- // 4. Let offset be ToIndex(byteOffset).
- Handle<Object> offset;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, offset,
- Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
+ isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "DataView")));
+ } else { // [[Construct]]
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+ Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+ // 2. If Type(buffer) is not Object, throw a TypeError exception.
+ // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+ // TypeError exception.
+ if (!buffer->IsJSArrayBuffer()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+ }
+ Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
- // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
- // We currently violate the specification at this point.
+ // 4. Let offset be ? ToIndex(byteOffset).
+ Handle<Object> offset;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, offset,
+ Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
- // 6. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
- // internal slot.
- double const buffer_byte_length = array_buffer->byte_length()->Number();
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // We currently violate the specification at this point. TODO: Fix that.
- // 7. If offset > bufferByteLength, throw a RangeError exception
- if (offset->Number() > buffer_byte_length) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
- }
+ // 6. Let bufferByteLength be the value of buffer's
+ // [[ArrayBufferByteLength]] internal slot.
+ double const buffer_byte_length = array_buffer->byte_length()->Number();
- Handle<Object> view_byte_length;
- if (byte_length->IsUndefined(isolate)) {
- // 8. If byteLength is undefined, then
- // a. Let viewByteLength be bufferByteLength - offset.
- view_byte_length =
- isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
- } else {
- // 9. Else,
- // a. Let viewByteLength be ? ToIndex(byteLength).
- // b. If offset+viewByteLength > bufferByteLength, throw a RangeError
- // exception
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, view_byte_length,
- Object::ToIndex(isolate, byte_length,
- MessageTemplate::kInvalidDataViewLength));
- if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ // 7. If offset > bufferByteLength, throw a RangeError exception.
+ if (offset->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
}
- }
- // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
- // "%DataViewPrototype%", Ā«[[DataView]], [[ViewedArrayBuffer]],
- // [[ByteLength]], [[ByteOffset]]Ā»).
- // 11. Set O's [[DataView]] internal slot to true.
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- JSObject::New(target, new_target));
- for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
- Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
- }
+ Handle<Object> view_byte_length;
+ if (byte_length->IsUndefined(isolate)) {
+ // 8. If byteLength is either not present or undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
+ view_byte_length =
+ isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ } else {
+ // 9. Else,
+ // a. Let viewByteLength be ? ToIndex(byteLength).
+ // b. If offset+viewByteLength > bufferByteLength, throw a
+ // RangeError exception.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, view_byte_length,
+ Object::ToIndex(isolate, byte_length,
+ MessageTemplate::kInvalidDataViewLength));
+ if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ }
+ }
- // 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
- Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+ // 10. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", Ā«[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]Ā»).
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ for (int i = 0; i < ArrayBufferView::kEmbedderFieldCount; ++i) {
+ Handle<JSDataView>::cast(result)->SetEmbedderField(i, Smi::kZero);
+ }
+
+ // 11. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+ Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
- // 13. Set O's [[ByteLength]] internal slot to viewByteLength.
- Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+ // 12. Set O's [[ByteLength]] internal slot to viewByteLength.
+ Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
- // 14. Set O's [[ByteOffset]] internal slot to offset.
- Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+ // 13. Set O's [[ByteOffset]] internal slot to offset.
+ Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
- // 15. Return O.
- return *result;
+ // 14. Return O.
+ return *result;
+ }
}
// ES6 section 24.2.4.1 get DataView.prototype.buffer
@@ -176,7 +173,7 @@ MaybeHandle<Object> AllocateResult(Isolate* isolate, uint64_t value) {
template <typename T>
MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Handle<Object> request_index,
- bool is_little_endian) {
+ bool is_little_endian, const char* method) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, request_index,
Object::ToIndex(isolate, request_index,
@@ -190,6 +187,13 @@ MaybeHandle<Object> GetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
isolate);
+ if (buffer->was_neutered()) {
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
+ Object);
+ }
size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
if (get_index + sizeof(T) > data_view_byte_length ||
@@ -290,7 +294,8 @@ uint64_t DataViewConvertValue<uint64_t>(Handle<Object> value) {
template <typename T>
MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
Handle<Object> request_index,
- bool is_little_endian, Handle<Object> value) {
+ bool is_little_endian, Handle<Object> value,
+ const char* method) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, request_index,
Object::ToIndex(isolate, request_index,
@@ -306,6 +311,13 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
}
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()),
isolate);
+ if (buffer->was_neutered()) {
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method);
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation, operation),
+ Object);
+ }
size_t const data_view_byte_offset = NumberToSize(data_view->byte_offset());
size_t const data_view_byte_length = NumberToSize(data_view->byte_length());
if (get_index + sizeof(T) > data_view_byte_length ||
@@ -343,7 +355,8 @@ MaybeHandle<Object> SetViewValue(Isolate* isolate, Handle<JSDataView> data_view,
ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
isolate, result, \
GetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue())); \
+ is_little_endian->BooleanValue(), \
+ "DataView.prototype.get" #Type)); \
return *result; \
}
DATA_VIEW_PROTOTYPE_GET(Int8, int8_t)
@@ -369,7 +382,8 @@ DATA_VIEW_PROTOTYPE_GET(BigUint64, uint64_t)
ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
isolate, result, \
SetViewValue<type>(isolate, data_view, byte_offset, \
- is_little_endian->BooleanValue(), value)); \
+ is_little_endian->BooleanValue(), value, \
+ "DataView.prototype.get" #Type)); \
return *result; \
}
DATA_VIEW_PROTOTYPE_SET(Int8, int8_t)
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 5f9f31e10b..c60275d94e 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -193,95 +193,94 @@ Object* SetLocalDateValue(Handle<JSDate> date, double time_val) {
} // namespace
-// ES6 section 20.3.2 The Date Constructor for the [[Call]] case.
+// ES #sec-date-constructor
BUILTIN(DateConstructor) {
HandleScope scope(isolate);
- double const time_val = JSDate::CurrentTimeValue(isolate);
- char buffer[128];
- ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
- RETURN_RESULT_OR_FAILURE(
- isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
-}
-
-// ES6 section 20.3.2 The Date Constructor for the [[Construct]] case.
-BUILTIN(DateConstructor_ConstructStub) {
- HandleScope scope(isolate);
- int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- double time_val;
- if (argc == 0) {
- time_val = JSDate::CurrentTimeValue(isolate);
- } else if (argc == 1) {
- Handle<Object> value = args.at(1);
- if (value->IsJSDate()) {
- time_val = Handle<JSDate>::cast(value)->value()->Number();
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToPrimitive(value));
- if (value->IsString()) {
- time_val = ParseDateTimeString(Handle<String>::cast(value));
+ if (args.new_target()->IsUndefined(isolate)) {
+ double const time_val = JSDate::CurrentTimeValue(isolate);
+ char buffer[128];
+ ToDateString(time_val, ArrayVector(buffer), isolate->date_cache());
+ RETURN_RESULT_OR_FAILURE(
+ isolate, isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ } else {
+ int const argc = args.length() - 1;
+ Handle<JSFunction> target = args.target();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ double time_val;
+ if (argc == 0) {
+ time_val = JSDate::CurrentTimeValue(isolate);
+ } else if (argc == 1) {
+ Handle<Object> value = args.at(1);
+ if (value->IsJSDate()) {
+ time_val = Handle<JSDate>::cast(value)->value()->Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Object::ToNumber(value));
- time_val = value->Number();
+ Object::ToPrimitive(value));
+ if (value->IsString()) {
+ time_val = ParseDateTimeString(Handle<String>::cast(value));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ Object::ToNumber(value));
+ time_val = value->Number();
+ }
}
- }
- } else {
- Handle<Object> year_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at(1)));
- Handle<Object> month_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at(2)));
- double year = year_object->Number();
- double month = month_object->Number();
- double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
- if (argc >= 3) {
- Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at(3)));
- date = date_object->Number();
- if (argc >= 4) {
- Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
- Object::ToNumber(args.at(4)));
- hours = hours_object->Number();
- if (argc >= 5) {
- Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
- Object::ToNumber(args.at(5)));
- minutes = minutes_object->Number();
- if (argc >= 6) {
- Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
- Object::ToNumber(args.at(6)));
- seconds = seconds_object->Number();
- if (argc >= 7) {
- Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms_object,
- Object::ToNumber(args.at(7)));
- ms = ms_object->Number();
+ } else {
+ Handle<Object> year_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
+ Object::ToNumber(args.at(1)));
+ Handle<Object> month_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
+ Object::ToNumber(args.at(2)));
+ double year = year_object->Number();
+ double month = month_object->Number();
+ double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+ if (argc >= 3) {
+ Handle<Object> date_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at(3)));
+ date = date_object->Number();
+ if (argc >= 4) {
+ Handle<Object> hours_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
+ hours = hours_object->Number();
+ if (argc >= 5) {
+ Handle<Object> minutes_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
+ minutes = minutes_object->Number();
+ if (argc >= 6) {
+ Handle<Object> seconds_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
+ seconds = seconds_object->Number();
+ if (argc >= 7) {
+ Handle<Object> ms_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ms_object, Object::ToNumber(args.at(7)));
+ ms = ms_object->Number();
+ }
}
}
}
}
+ if (!std::isnan(year)) {
+ double const y = DoubleToInteger(year);
+ if (0.0 <= y && y <= 99) year = 1900 + y;
+ }
+ double const day = MakeDay(year, month, date);
+ double const time = MakeTime(hours, minutes, seconds, ms);
+ time_val = MakeDate(day, time);
+ if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
+ time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
+ time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
+ } else {
+ time_val = std::numeric_limits<double>::quiet_NaN();
+ }
}
- if (!std::isnan(year)) {
- double const y = DoubleToInteger(year);
- if (0.0 <= y && y <= 99) year = 1900 + y;
- }
- double const day = MakeDay(year, month, date);
- double const time = MakeTime(hours, minutes, seconds, ms);
- time_val = MakeDate(day, time);
- if (time_val >= -DateCache::kMaxTimeBeforeUTCInMs &&
- time_val <= DateCache::kMaxTimeBeforeUTCInMs) {
- time_val = isolate->date_cache()->ToUTC(static_cast<int64_t>(time_val));
- } else {
- time_val = std::numeric_limits<double>::quiet_NaN();
- }
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSDate::New(target, new_target, time_val));
}
- RETURN_RESULT_OR_FAILURE(isolate, JSDate::New(target, new_target, time_val));
}
// ES6 section 20.3.3.1 Date.now ( )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index bf5b9086aa..0f60dfd97e 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -65,7 +65,6 @@ namespace internal {
TFC(ConstructWithArrayLike, ConstructWithArrayLike, 1) \
ASM(ConstructForwardVarargs) \
ASM(ConstructFunctionForwardVarargs) \
- ASM(JSConstructStubApi) \
ASM(JSConstructStubGenericRestrictedReturn) \
ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
@@ -110,11 +109,9 @@ namespace internal {
ASM(InterpreterEntryTrampoline) \
ASM(InterpreterPushArgsThenCall) \
ASM(InterpreterPushUndefinedAndArgsThenCall) \
- ASM(InterpreterPushArgsThenCallFunction) \
- ASM(InterpreterPushUndefinedAndArgsThenCallFunction) \
ASM(InterpreterPushArgsThenCallWithFinalSpread) \
ASM(InterpreterPushArgsThenConstruct) \
- ASM(InterpreterPushArgsThenConstructFunction) \
+ ASM(InterpreterPushArgsThenConstructArrayFunction) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread) \
ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \
@@ -123,7 +120,6 @@ namespace internal {
/* Code life-cycle */ \
ASM(CompileLazy) \
ASM(CompileLazyDeoptimizedCode) \
- ASM(CheckOptimizationMarker) \
ASM(DeserializeLazy) \
ASM(InstantiateAsmJs) \
ASM(NotifyDeoptimized) \
@@ -171,6 +167,7 @@ namespace internal {
TFC(NewArgumentsElements, NewArgumentsElements, 1) \
\
/* Debugger */ \
+ TFJ(DebugBreakTrampoline, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
ASM(FrameDropperTrampoline) \
ASM(HandleDebuggerStatement) \
\
@@ -214,6 +211,7 @@ namespace internal {
TFH(LoadIC_Uninitialized, LoadWithVector) \
TFH(StoreGlobalIC_Slow, StoreWithVector) \
TFH(StoreIC_Uninitialized, StoreWithVector) \
+ TFH(StoreInArrayLiteralIC_Slow, StoreWithVector) \
\
/* Microtask helpers */ \
TFS(EnqueueMicrotask, kMicrotask) \
@@ -246,8 +244,19 @@ namespace internal {
/* ES6 #sec-array.of */ \
TFJ(ArrayOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES7 #sec-array.prototype.includes */ \
+ TFS(ArrayIncludesSmiOrObject, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIncludesPackedDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIncludesHoleyDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
+ TFS(ArrayIndexOfSmiOrObject, kElements, kSearchElement, kLength, kFromIndex) \
+ TFS(ArrayIndexOfPackedDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
+ TFS(ArrayIndexOfHoleyDoubles, kElements, kSearchElement, kLength, \
+ kFromIndex) \
TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
@@ -259,7 +268,6 @@ namespace internal {
CPP(ArrayShift) \
TFJ(ArrayPrototypeShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
- CPP(ArraySlice) \
TFJ(ArrayPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.splice */ \
CPP(ArraySplice) \
@@ -355,10 +363,18 @@ namespace internal {
TFJ(ArrayPrototypeValues, 0) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
TFJ(ArrayIteratorPrototypeNext, 0) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
+ TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
+ TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
+ kMapperFunction, kThisArg) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten */ \
+ TFJ(ArrayPrototypeFlatten, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \
+ TFJ(ArrayPrototypeFlatMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* ArrayBuffer */ \
+ /* ES #sec-arraybuffer-constructor */ \
CPP(ArrayBufferConstructor) \
- CPP(ArrayBufferConstructor_ConstructStub) \
CPP(ArrayBufferConstructor_DoNotInitialize) \
CPP(ArrayBufferPrototypeGetByteLength) \
CPP(ArrayBufferIsView) \
@@ -374,7 +390,6 @@ namespace internal {
\
/* BigInt */ \
CPP(BigIntConstructor) \
- CPP(BigIntConstructor_ConstructStub) \
CPP(BigIntAsUintN) \
CPP(BigIntAsIntN) \
CPP(BigIntPrototypeToLocaleString) \
@@ -382,8 +397,8 @@ namespace internal {
CPP(BigIntPrototypeValueOf) \
\
/* Boolean */ \
+ /* ES #sec-boolean-constructor */ \
CPP(BooleanConstructor) \
- CPP(BooleanConstructor_ConstructStub) \
/* ES6 #sec-boolean.prototype.tostring */ \
TFJ(BooleanPrototypeToString, 0) \
/* ES6 #sec-boolean.prototype.valueof */ \
@@ -435,8 +450,8 @@ namespace internal {
CPP(ConsoleContext) \
\
/* DataView */ \
+ /* ES #sec-dataview-constructor */ \
CPP(DataViewConstructor) \
- CPP(DataViewConstructor_ConstructStub) \
CPP(DataViewPrototypeGetBuffer) \
CPP(DataViewPrototypeGetByteLength) \
CPP(DataViewPrototypeGetByteOffset) \
@@ -462,8 +477,8 @@ namespace internal {
CPP(DataViewPrototypeSetBigUint64) \
\
/* Date */ \
+ /* ES #sec-date-constructor */ \
CPP(DateConstructor) \
- CPP(DateConstructor_ConstructStub) \
/* ES6 #sec-date.prototype.getdate */ \
TFJ(DatePrototypeGetDate, 0) \
/* ES6 #sec-date.prototype.getday */ \
@@ -599,6 +614,7 @@ namespace internal {
TFH(StoreICTrampoline, Store) \
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
+ TFH(StoreInArrayLiteralIC, StoreWithVector) \
TFH(LoadGlobalIC, LoadGlobalWithVector) \
TFH(LoadGlobalICInsideTypeof, LoadGlobalWithVector) \
TFH(LoadGlobalICTrampoline, LoadGlobal) \
@@ -699,11 +715,8 @@ namespace internal {
\
/* Number */ \
TFC(AllocateHeapNumber, AllocateHeapNumber, 1) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
+ /* ES #sec-number-constructor */ \
TFJ(NumberConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
- TFJ(NumberConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-number.isfinite */ \
TFJ(NumberIsFinite, 1, kNumber) \
/* ES6 #sec-number.isinteger */ \
@@ -748,9 +761,8 @@ namespace internal {
TFS(Negate, kValue) \
\
/* Object */ \
+ /* ES #sec-object-constructor */ \
TFJ(ObjectConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(ObjectConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(ObjectAssign) \
/* ES #sec-object.create */ \
TFJ(ObjectCreate, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -814,7 +826,8 @@ namespace internal {
TFJ(PromiseGetCapabilitiesExecutor, 2, kResolve, kReject) \
/* ES6 #sec-newpromisecapability */ \
TFS(NewPromiseCapability, kConstructor, kDebugEvent) \
- TFJ(PromiseConstructorLazyDeoptContinuation, 2, kPromise, kResult) \
+ TFJ(PromiseConstructorLazyDeoptContinuation, 4, kPromise, kReject, \
+ kException, kResult) \
/* ES6 #sec-promise-executor */ \
TFJ(PromiseConstructor, 1, kExecutor) \
CPP(IsPromise) \
@@ -853,9 +866,7 @@ namespace internal {
TFJ(PromiseInternalResolve, 2, kPromise, kResolution) \
\
/* Proxy */ \
- TFJ(ProxyConstructor, 0) \
- TFJ(ProxyConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ TFJ(ProxyConstructor, 2, kTarget, kHandler) \
TFJ(ProxyRevocable, 2, kTarget, kHandler) \
TFJ(ProxyRevoke, 0) \
TFS(ProxyGetProperty, kProxy, kName, kReceiverValue) \
@@ -909,6 +920,8 @@ namespace internal {
TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
/* ES #sec-regexp.prototype-@@match */ \
TFJ(RegExpPrototypeMatch, 1, kString) \
+ /* https://tc39.github.io/proposal-string-matchall/ */ \
+ TFJ(RegExpPrototypeMatchAll, 1, kString) \
/* ES #sec-get-regexp.prototype.multiline */ \
TFJ(RegExpPrototypeMultilineGetter, 0) \
/* ES #sec-regexp.prototype-@@search */ \
@@ -936,6 +949,10 @@ namespace internal {
TFS(RegExpSearchFast, kReceiver, kPattern) \
TFS(RegExpSplit, kRegExp, kString, kLimit) \
\
+ /* RegExp String Iterator */ \
+ /* https://tc39.github.io/proposal-string-matchall/ */ \
+ TFJ(RegExpStringIteratorPrototypeNext, 0) \
+ \
/* Set */ \
TFJ(SetConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kKey) \
@@ -970,9 +987,9 @@ namespace internal {
CPP(AtomicsWake) \
\
/* String */ \
+ /* ES #sec-string-constructor */ \
TFJ(StringConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(StringConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES #sec-string.fromcodepoint */ \
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1013,6 +1030,8 @@ namespace internal {
TFJ(StringPrototypeLink, 1, kValue) \
/* ES6 #sec-string.prototype.match */ \
TFJ(StringPrototypeMatch, 1, kRegexp) \
+ /* ES #sec-string.prototype.matchAll */ \
+ TFJ(StringPrototypeMatchAll, 1, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.padEnd */ \
@@ -1063,8 +1082,8 @@ namespace internal {
TFJ(StringIteratorPrototypeNext, 0) \
\
/* Symbol */ \
+ /* ES #sec-symbol-constructor */ \
CPP(SymbolConstructor) \
- CPP(SymbolConstructor_ConstructStub) \
/* ES6 #sec-symbol.for */ \
CPP(SymbolFor) \
/* ES6 #sec-symbol.keyfor */ \
@@ -1081,9 +1100,11 @@ namespace internal {
TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
kByteOffset) \
+ /* ES #sec-typedarray-constructors */ \
+ TFS(CreateTypedArray, kTarget, kNewTarget, kArg1, kArg2, kArg3) \
+ TFJ(TypedArrayBaseConstructor, 0) \
+ TFJ(TypedArrayConstructorLazyDeoptContinuation, 1, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- TFJ(TypedArrayConstructor_ConstructStub, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -1266,6 +1287,7 @@ namespace internal {
V(AsyncGeneratorResolve) \
V(PromiseAll) \
V(PromiseConstructor) \
+ V(PromiseConstructorLazyDeoptContinuation) \
V(PromiseFulfillReactionJob) \
V(PromiseRace) \
V(ResolvePromise)
@@ -1298,8 +1320,6 @@ namespace internal {
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
-#define BUILTINS_WITH_UNTAGGED_PARAMS(V) V(WasmCompileLazy)
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index 7c1db5093d..05b0fb9fcd 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
+#include "src/objects/descriptor-array.h"
namespace v8 {
namespace internal {
@@ -43,9 +44,14 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
// AccessorInfo objects. In that case, their value can be recomputed even if
// the actual value on the object changes.
Comment("Check descriptor array length");
- Node* descriptors = LoadMapDescriptors(receiver_map);
- Node* descriptors_length = LoadFixedArrayBaseLength(descriptors);
- GotoIf(SmiLessThanOrEqual(descriptors_length, SmiConstant(1)), &slow);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
+ // Minimum descriptor array length required for fast path.
+ const int min_descriptors_length = DescriptorArray::LengthFor(Max(
+ JSFunction::kLengthDescriptorIndex, JSFunction::kNameDescriptorIndex));
+ TNode<Smi> descriptors_length = LoadFixedArrayBaseLength(descriptors);
+ GotoIf(SmiLessThanOrEqual(descriptors_length,
+ SmiConstant(min_descriptors_length)),
+ &slow);
// Check whether the length and name properties are still present as
// AccessorInfo objects. In that case, their value can be recomputed even if
@@ -53,27 +59,27 @@ TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) {
Comment("Check name and length properties");
{
const int length_index = JSFunction::kLengthDescriptorIndex;
- Node* maybe_length = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(length_index));
+ TNode<Name> maybe_length = CAST(LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index)));
GotoIf(WordNotEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)),
&slow);
- Node* maybe_length_accessor = LoadFixedArrayElement(
+ TNode<Object> maybe_length_accessor = LoadFixedArrayElement(
descriptors, DescriptorArray::ToValueIndex(length_index));
GotoIf(TaggedIsSmi(maybe_length_accessor), &slow);
- Node* length_value_map = LoadMap(maybe_length_accessor);
+ Node* length_value_map = LoadMap(CAST(maybe_length_accessor));
GotoIfNot(IsAccessorInfoMap(length_value_map), &slow);
const int name_index = JSFunction::kNameDescriptorIndex;
- Node* maybe_name = LoadFixedArrayElement(
- descriptors, DescriptorArray::ToKeyIndex(name_index));
+ TNode<Name> maybe_name = CAST(LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(name_index)));
GotoIf(WordNotEqual(maybe_name, LoadRoot(Heap::kname_stringRootIndex)),
&slow);
- Node* maybe_name_accessor = LoadFixedArrayElement(
+ TNode<Object> maybe_name_accessor = LoadFixedArrayElement(
descriptors, DescriptorArray::ToValueIndex(name_index));
GotoIf(TaggedIsSmi(maybe_name_accessor), &slow);
- Node* name_value_map = LoadMap(maybe_name_accessor);
+ TNode<Map> name_value_map = LoadMap(CAST(maybe_name_accessor));
GotoIfNot(IsAccessorInfoMap(name_value_map), &slow);
}
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index 48c28ab730..29422ab72c 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -55,6 +55,15 @@ TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
receiver, name);
}
+TF_BUILTIN(StoreInArrayLiteralIC_Slow, CodeStubAssembler) {
+ Node* array = Parameter(Descriptor::kReceiver);
+ Node* index = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, context, value, array,
+ index);
+}
+
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
Node* name = Parameter(Descriptor::kName);
Node* slot = Parameter(Descriptor::kSlot);
diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc
index 94613a6a32..2439cd9d7b 100644
--- a/deps/v8/src/builtins/builtins-ic-gen.cc
+++ b/deps/v8/src/builtins/builtins-ic-gen.cc
@@ -35,6 +35,7 @@ IC_BUILTIN(StoreIC)
IC_BUILTIN(StoreICTrampoline)
IC_BUILTIN(KeyedStoreIC)
IC_BUILTIN(KeyedStoreICTrampoline)
+IC_BUILTIN(StoreInArrayLiteralIC)
IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index edc529c798..e1f4aea405 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -7,7 +7,9 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/heap/heap-inl.h"
+#include "src/ic/accessor-assembler.h"
#include "src/macro-assembler.h"
+#include "src/objects/debug-objects.h"
#include "src/objects/shared-function-info.h"
#include "src/runtime/runtime.h"
@@ -73,8 +75,9 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) {
TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
Node* frame = Parameter(Descriptor::kFrame);
- Node* length = SmiToIntPtr(Parameter(Descriptor::kLength));
- Node* mapped_count = SmiToIntPtr(Parameter(Descriptor::kMappedCount));
+ TNode<IntPtrT> length = SmiToIntPtr(Parameter(Descriptor::kLength));
+ TNode<IntPtrT> mapped_count =
+ SmiToIntPtr(Parameter(Descriptor::kMappedCount));
// Check if we can allocate in new space.
ElementsKind kind = PACKED_ELEMENTS;
@@ -102,9 +105,7 @@ TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) {
// The elements might be used to back mapped arguments. In that case fill
// the mapped elements (i.e. the first {mapped_count}) with the hole, but
// make sure not to overshoot the {length} if some arguments are missing.
- Node* number_of_holes =
- SelectConstant(IntPtrLessThan(mapped_count, length), mapped_count,
- length, MachineType::PointerRepresentation());
+ TNode<IntPtrT> number_of_holes = IntPtrMin(mapped_count, length);
Node* the_hole = TheHoleConstant();
// Fill the first elements up to {number_of_holes} with the hole.
@@ -173,6 +174,43 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
Return(Parameter(Descriptor::kReceiver));
}
+TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
+ Label tailcall_to_shared(this);
+ TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
+ TNode<Int32T> arg_count =
+ UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
+ TNode<JSFunction> function = CAST(LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer()));
+
+ // Check break-at-entry flag on the debug info.
+ TNode<SharedFunctionInfo> shared =
+ CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
+ TNode<Object> maybe_debug_info =
+ LoadObjectField(shared, SharedFunctionInfo::kDebugInfoOffset);
+ GotoIf(TaggedIsSmi(maybe_debug_info), &tailcall_to_shared);
+
+ {
+ TNode<DebugInfo> debug_info = CAST(maybe_debug_info);
+ TNode<Smi> flags =
+ CAST(LoadObjectField(debug_info, DebugInfo::kFlagsOffset));
+ GotoIfNot(SmiToInt32(SmiAnd(flags, SmiConstant(DebugInfo::kBreakAtEntry))),
+ &tailcall_to_shared);
+
+ CallRuntime(Runtime::kDebugBreakAtEntry, context, function);
+ Goto(&tailcall_to_shared);
+ }
+
+ BIND(&tailcall_to_shared);
+ // Tail call into code object on the SharedFunctionInfo.
+ TNode<Code> code = GetSharedFunctionInfoCode(shared);
+ // Use the ConstructTrampolineDescriptor because it passes new.target too in
+ // case this is called during construct.
+ CSA_ASSERT(this, IsCode(code));
+ ConstructTrampolineDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code, context, function, new_target, arg_count);
+}
+
class RecordWriteCodeStubAssembler : public CodeStubAssembler {
public:
explicit RecordWriteCodeStubAssembler(compiler::CodeAssemblerState* state)
@@ -442,10 +480,10 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) {
Return(TrueConstant());
}
-class DeletePropertyBaseAssembler : public CodeStubAssembler {
+class DeletePropertyBaseAssembler : public AccessorAssembler {
public:
explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ : AccessorAssembler(state) {}
void DeleteDictionaryProperty(Node* receiver, Node* properties, Node* name,
Node* context, Label* dont_delete,
@@ -532,6 +570,8 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
BIND(&dictionary);
{
+ InvalidateValidityCellIfPrototype(receiver_map);
+
Node* properties = LoadSlowProperties(receiver);
DeleteDictionaryProperty(receiver, properties, unique, context,
&dont_delete, &if_notfound);
@@ -853,8 +893,8 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
Goto(&loop);
BIND(&loop);
{
- TNode<HeapObject> microtask = TNode<HeapObject>::UncheckedCast(
- LoadFixedArrayElement(queue, index.value()));
+ TNode<HeapObject> microtask =
+ CAST(LoadFixedArrayElement(queue, index.value()));
index = IntPtrAdd(index.value(), IntPtrConstant(1));
CSA_ASSERT(this, TaggedIsNotSmi(microtask));
@@ -921,8 +961,10 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
// But from our current measurements it doesn't seem to be a
// serious performance problem, even if the microtask is full
// of CallHandlerTasks (which is not a realistic use case anyways).
- CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
- microtask_callback, microtask_data);
+ Node* const result =
+ CallRuntime(Runtime::kRunMicrotaskCallback, current_context,
+ microtask_callback, microtask_data);
+ GotoIfException(result, &if_exception, &var_exception);
Goto(&loop_next);
}
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index a8552338c8..f0d5160330 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -14,12 +14,6 @@ void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
-void Builtins::Generate_InterpreterPushArgsThenCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
@@ -27,13 +21,6 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
InterpreterPushArgsMode::kOther);
}
-void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCallFunction(
- MacroAssembler* masm) {
- return Generate_InterpreterPushArgsThenCallImpl(
- masm, ConvertReceiverMode::kNullOrUndefined,
- InterpreterPushArgsMode::kJSFunction);
-}
-
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenCallImpl(
@@ -52,10 +39,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread(
masm, InterpreterPushArgsMode::kWithFinalSpread);
}
-void Builtins::Generate_InterpreterPushArgsThenConstructFunction(
+void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction(
MacroAssembler* masm) {
return Generate_InterpreterPushArgsThenConstructImpl(
- masm, InterpreterPushArgsMode::kJSFunction);
+ masm, InterpreterPushArgsMode::kArrayFunction);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-interpreter.cc b/deps/v8/src/builtins/builtins-interpreter.cc
index 64c43ee4ac..d05a8656db 100644
--- a/deps/v8/src/builtins/builtins-interpreter.cc
+++ b/deps/v8/src/builtins/builtins-interpreter.cc
@@ -14,15 +14,10 @@ namespace internal {
Handle<Code> Builtins::InterpreterPushArgsThenCall(
ConvertReceiverMode receiver_mode, InterpreterPushArgsMode mode) {
switch (mode) {
- case InterpreterPushArgsMode::kJSFunction:
- switch (receiver_mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return builtin_handle(
- kInterpreterPushUndefinedAndArgsThenCallFunction);
- case ConvertReceiverMode::kNotNullOrUndefined:
- case ConvertReceiverMode::kAny:
- return builtin_handle(kInterpreterPushArgsThenCallFunction);
- }
+ case InterpreterPushArgsMode::kArrayFunction:
+ // There is no special-case handling of calls to Array. They will all go
+ // through the kOther case below.
+ UNREACHABLE();
case InterpreterPushArgsMode::kWithFinalSpread:
return builtin_handle(kInterpreterPushArgsThenCallWithFinalSpread);
case InterpreterPushArgsMode::kOther:
@@ -40,8 +35,8 @@ Handle<Code> Builtins::InterpreterPushArgsThenCall(
Handle<Code> Builtins::InterpreterPushArgsThenConstruct(
InterpreterPushArgsMode mode) {
switch (mode) {
- case InterpreterPushArgsMode::kJSFunction:
- return builtin_handle(kInterpreterPushArgsThenConstructFunction);
+ case InterpreterPushArgsMode::kArrayFunction:
+ return builtin_handle(kInterpreterPushArgsThenConstructArrayFunction);
case InterpreterPushArgsMode::kWithFinalSpread:
return builtin_handle(kInterpreterPushArgsThenConstructWithFinalSpread);
case InterpreterPushArgsMode::kOther:
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index 21f6039f08..57702556a9 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-iterator-gen.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -31,23 +31,42 @@ IteratorRecord IteratorBuiltinsAssembler::GetIterator(Node* context,
Variable* exception) {
GotoIfException(method, if_exception, exception);
- Callable callable = CodeFactory::Call(isolate());
- Node* iterator = CallJS(callable, context, method, object);
- GotoIfException(iterator, if_exception, exception);
-
- Label get_next(this), if_notobject(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(iterator), &if_notobject);
- Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
+ Label if_not_callable(this, Label::kDeferred), if_callable(this);
+ GotoIf(TaggedIsSmi(method), &if_not_callable);
+ Branch(IsCallable(method), &if_callable, &if_not_callable);
- BIND(&if_notobject);
- { ThrowTypeError(context, MessageTemplate::kNotAnIterator, iterator); }
-
- BIND(&get_next);
- Node* const next = GetProperty(context, iterator, factory()->next_string());
- GotoIfException(next, if_exception, exception);
+ BIND(&if_not_callable);
+ {
+ Node* ret = CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kNotIterable), object);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
- return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
- TNode<Object>::UncheckedCast(next)};
+ BIND(&if_callable);
+ {
+ Callable callable = CodeFactory::Call(isolate());
+ Node* iterator = CallJS(callable, context, method, object);
+ GotoIfException(iterator, if_exception, exception);
+
+ Label get_next(this), if_notobject(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(iterator), &if_notobject);
+ Branch(IsJSReceiver(iterator), &get_next, &if_notobject);
+
+ BIND(&if_notobject);
+ {
+ Node* ret = CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context);
+ GotoIfException(ret, if_exception, exception);
+ Unreachable();
+ }
+
+ BIND(&get_next);
+ Node* const next = GetProperty(context, iterator, factory()->next_string());
+ GotoIfException(next, if_exception, exception);
+
+ return IteratorRecord{TNode<JSReceiver>::UncheckedCast(iterator),
+ TNode<Object>::UncheckedCast(next)};
+ }
}
Node* IteratorBuiltinsAssembler::IteratorStep(
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 3fb8d7792d..20d9453aa7 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/shared-function-info.h"
@@ -141,14 +141,15 @@ Node* ObjectBuiltinsAssembler::ConstructDataDescriptor(Node* context,
Node* ObjectBuiltinsAssembler::IsSpecialReceiverMap(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ TNode<BoolT> is_special =
+ IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask =
Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
USE(mask);
// Interceptors or access checks imply special receiver.
CSA_ASSERT(this,
- SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
+ SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask),
+ is_special, Int32TrueConstant()));
return is_special;
}
@@ -313,7 +314,7 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
CSA_ASSERT(this, WordEqual(map, LoadMap(object)));
TNode<Uint32T> descriptor_index = TNode<Uint32T>::UncheckedCast(
TruncateIntPtrToInt32(var_descriptor_number.value()));
- Node* next_key = DescriptorArrayGetKey(descriptors, descriptor_index);
+ Node* next_key = GetKey(descriptors, descriptor_index);
// Skip Symbols.
GotoIf(IsSymbol(next_key), &next_descriptor);
@@ -332,8 +333,8 @@ TNode<JSArray> ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries(
VARIABLE(var_property_value, MachineRepresentation::kTagged,
UndefinedConstant());
- Node* descriptor_name_index = DescriptorArrayToKeyIndex(
- TruncateIntPtrToInt32(var_descriptor_number.value()));
+ TNode<IntPtrT> descriptor_name_index = ToKeyIndex<DescriptorArray>(
+ Unsigned(TruncateIntPtrToInt32(var_descriptor_number.value())));
// Let value be ? Get(O, key).
LoadPropertyFromFastObject(object, map, descriptors,
@@ -771,11 +772,15 @@ TF_BUILTIN(ObjectPrototypeToString, ObjectBuiltinsAssembler) {
// as the exception is observable.
Node* receiver_is_array =
CallRuntime(Runtime::kArrayIsArray, context, receiver);
- Node* builtin_tag = SelectTaggedConstant<Object>(
- IsTrue(receiver_is_array), LoadRoot(Heap::kArray_stringRootIndex),
- SelectTaggedConstant<Object>(IsCallableMap(receiver_map),
- LoadRoot(Heap::kFunction_stringRootIndex),
- LoadRoot(Heap::kObject_stringRootIndex)));
+ TNode<String> builtin_tag = Select<String>(
+ IsTrue(receiver_is_array),
+ [=] { return CAST(LoadRoot(Heap::kArray_stringRootIndex)); },
+ [=] {
+ return Select<String>(
+ IsCallableMap(receiver_map),
+ [=] { return CAST(LoadRoot(Heap::kFunction_stringRootIndex)); },
+ [=] { return CAST(LoadRoot(Heap::kObject_stringRootIndex)); });
+ });
// Lookup the @@toStringTag property on the {receiver}.
VARIABLE(var_tag, MachineRepresentation::kTagged,
@@ -1053,7 +1058,7 @@ TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kObject);
Node* context = Parameter(Descriptor::kContext);
- Return(GetSuperConstructor(object, context));
+ Return(GetSuperConstructor(context, object));
}
TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
@@ -1071,8 +1076,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
Node* shared =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* bytecode_array =
- LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
+ Node* bytecode_array = LoadSharedFunctionInfoBytecodeArray(shared);
+
Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
@@ -1130,9 +1135,7 @@ TF_BUILTIN(ObjectGetOwnPropertyDescriptor, ObjectBuiltinsAssembler) {
return_undefined(this, Label::kDeferred), if_notunique_name(this);
Node* map = LoadMap(object);
Node* instance_type = LoadMapInstanceType(map);
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- &call_runtime);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &call_runtime);
{
VARIABLE(var_index, MachineType::PointerRepresentation(),
IntPtrConstant(0));
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 032d0ef100..dd38dbc543 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -198,6 +198,50 @@ Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
return context;
}
+Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementContext(
+ Node* promise_capability, Node* native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ // TODO(bmeurer): Manually fold this into a single allocation.
+ Node* const array_map = LoadContextElement(
+ native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
+ Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
+ IntPtrConstant(0), SmiConstant(0));
+
+ Node* const context =
+ CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementRemainingSlot, SmiConstant(1));
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementCapabilitySlot, promise_capability);
+ StoreContextElementNoWriteBarrier(
+ context, kPromiseAllResolveElementValuesArraySlot, values_array);
+
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseAllResolveElementFunction(
+ Node* context, Node* index, Node* native_context) {
+ CSA_ASSERT(this, TaggedIsSmi(index));
+ CSA_ASSERT(this, SmiGreaterThan(index, SmiConstant(0)));
+ CSA_ASSERT(this, SmiLessThanOrEqual(
+ index, SmiConstant(PropertyArray::HashField::kMax)));
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info = LoadContextElement(
+ native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, context);
+
+ STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
+ StoreObjectFieldNoWriteBarrier(resolve, JSFunction::kPropertiesOrHashOffset,
+ index);
+
+ return resolve;
+}
+
Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
Node* promise, Node* debug_event, Node* native_context) {
Node* const context =
@@ -526,6 +570,61 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
return var_result.value();
}
+Node* PromiseBuiltinsAssembler::InvokeResolve(Node* native_context,
+ Node* constructor, Node* value,
+ Label* if_exception,
+ Variable* var_exception) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+ Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ BranchIfPromiseResolveLookupChainIntact(native_context, constructor, &if_fast,
+ &if_slow);
+
+ BIND(&if_fast);
+ {
+ Node* const result = CallBuiltin(Builtins::kPromiseResolve, native_context,
+ constructor, value);
+ GotoIfException(result, if_exception, var_exception);
+
+ var_result.Bind(result);
+ Goto(&done);
+ }
+
+ BIND(&if_slow);
+ {
+ Node* const resolve =
+ GetProperty(native_context, constructor, factory()->resolve_string());
+ GotoIfException(resolve, if_exception, var_exception);
+
+ Node* const result = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ native_context, resolve, constructor, value);
+ GotoIfException(result, if_exception, var_exception);
+
+ var_result.Bind(result);
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return var_result.value();
+}
+
+void PromiseBuiltinsAssembler::BranchIfPromiseResolveLookupChainIntact(
+ Node* native_context, Node* constructor, Label* if_fast, Label* if_slow) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+
+ GotoIfForceSlowPath(if_slow);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ GotoIfNot(WordEqual(promise_fun, constructor), if_slow);
+ Branch(IsPromiseResolveProtectorCellInvalid(), if_slow, if_fast);
+}
+
void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
Node* native_context, Node* promise_map, Label* if_fast, Label* if_slow) {
CSA_ASSERT(this, IsNativeContext(native_context));
@@ -536,7 +635,7 @@ void PromiseBuiltinsAssembler::BranchIfPromiseSpeciesLookupChainIntact(
GotoIfForceSlowPath(if_slow);
GotoIfNot(WordEqual(LoadMapPrototype(promise_map), promise_prototype),
if_slow);
- Branch(IsSpeciesProtectorCellInvalid(), if_slow, if_fast);
+ Branch(IsPromiseSpeciesProtectorCellInvalid(), if_slow, if_fast);
}
void PromiseBuiltinsAssembler::BranchIfPromiseThenLookupChainIntact(
@@ -668,6 +767,18 @@ TF_BUILTIN(PromiseCapabilityDefaultResolve, PromiseBuiltinsAssembler) {
TF_BUILTIN(PromiseConstructorLazyDeoptContinuation, PromiseBuiltinsAssembler) {
Node* promise = Parameter(Descriptor::kPromise);
+ Node* reject = Parameter(Descriptor::kReject);
+ Node* exception = Parameter(Descriptor::kException);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Label finally(this);
+
+ GotoIf(IsTheHole(exception), &finally);
+ CallJS(CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, reject, UndefinedConstant(), exception);
+ Goto(&finally);
+
+ BIND(&finally);
Return(promise);
}
@@ -1171,10 +1282,10 @@ TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
// intact, as that guards the lookup path for "constructor" on
// JSPromise instances which have the (initial) Promise.prototype.
Node* const promise_prototype =
- LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
GotoIfNot(WordEqual(LoadMapPrototype(value_map), promise_prototype),
&if_slow_constructor);
- GotoIf(IsSpeciesProtectorCellInvalid(), &if_slow_constructor);
+ GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow_constructor);
// If the {constructor} is the Promise function, we just immediately
// return the {value} here and don't bother wrapping it into a
@@ -1606,21 +1717,23 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
// 7. If Type(resolution) is not Object, then
GotoIf(TaggedIsSmi(resolution), &if_fulfill);
- Node* const result_map = LoadMap(resolution);
- GotoIfNot(IsJSReceiverMap(result_map), &if_fulfill);
+ Node* const resolution_map = LoadMap(resolution);
+ GotoIfNot(IsJSReceiverMap(resolution_map), &if_fulfill);
// We can skip the "then" lookup on {resolution} if its [[Prototype]]
// is the (initial) Promise.prototype and the Promise#then protector
// is intact, as that guards the lookup path for the "then" property
// on JSPromise instances which have the (initial) %PromisePrototype%.
- Label if_fast(this), if_slow(this, Label::kDeferred);
+ Label if_fast(this), if_generic(this), if_slow(this, Label::kDeferred);
Node* const native_context = LoadNativeContext(context);
- BranchIfPromiseThenLookupChainIntact(native_context, result_map, &if_fast,
- &if_slow);
+ GotoIfForceSlowPath(&if_slow);
+ GotoIf(IsPromiseThenProtectorCellInvalid(), &if_slow);
+ GotoIfNot(IsJSPromiseMap(resolution_map), &if_generic);
+ Node* const promise_prototype =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_INDEX);
+ Branch(WordEqual(LoadMapPrototype(resolution_map), promise_prototype),
+ &if_fast, &if_slow);
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
BIND(&if_fast);
{
Node* const then =
@@ -1629,6 +1742,21 @@ TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
Goto(&do_enqueue);
}
+ BIND(&if_generic);
+ {
+ // We can skip the lookup of "then" if the {resolution} is a (newly
+ // created) IterResultObject, as the Promise#then protector also
+ // ensures that the intrinsic %ObjectPrototype% doesn't contain any
+ // "then" property. This helps to avoid negative lookups on iterator
+ // results from async generators.
+ CSA_ASSERT(this, IsJSReceiverMap(resolution_map));
+ CSA_ASSERT(this, Word32BinaryNot(IsPromiseThenProtectorCellInvalid()));
+ Node* const iterator_result_map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Branch(WordEqual(resolution_map, iterator_result_map), &if_fulfill,
+ &if_slow);
+ }
+
BIND(&if_slow);
{
// 8. Let then be Get(resolution, "then").
@@ -1677,26 +1805,23 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
const IteratorRecord& iterator, Label* if_exception,
Variable* var_exception) {
IteratorBuiltinsAssembler iter_assembler(state());
- Label close_iterator(this);
Node* const instrumenting = IsDebugActive();
+ Node* const native_context = LoadNativeContext(context);
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
SetForwardingHandlerIfTrue(
- context, instrumenting,
+ native_context, instrumenting,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
- Node* const native_context = LoadNativeContext(context);
- Node* const array_map = LoadContextElement(
- native_context, Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX);
- Node* const values_array = AllocateJSArray(PACKED_ELEMENTS, array_map,
- IntPtrConstant(0), SmiConstant(0));
- Node* const remaining_elements = AllocateSmiCell(1);
+ Node* const resolve_element_context =
+ CreatePromiseAllResolveElementContext(capability, native_context);
- VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(0));
-
- Label loop(this, &var_index), break_loop(this);
+ VARIABLE(var_index, MachineRepresentation::kTagged, SmiConstant(1));
+ Label loop(this, &var_index), done_loop(this),
+ too_many_elements(this, Label::kDeferred),
+ close_iterator(this, Label::kDeferred);
Goto(&loop);
BIND(&loop);
{
@@ -1706,119 +1831,147 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
Node* const fast_iterator_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
Node* const next = iter_assembler.IteratorStep(
- context, iterator, &break_loop, fast_iterator_result_map, if_exception,
- var_exception);
+ native_context, iterator, &done_loop, fast_iterator_result_map,
+ if_exception, var_exception);
// Let nextValue be IteratorValue(next).
// If nextValue is an abrupt completion, set iteratorRecord.[[Done]] to
// true.
// ReturnIfAbrupt(nextValue).
Node* const next_value = iter_assembler.IteratorValue(
- context, next, fast_iterator_result_map, if_exception, var_exception);
+ native_context, next, fast_iterator_result_map, if_exception,
+ var_exception);
// Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
- Node* const promise_resolve =
- GetProperty(context, constructor, factory()->resolve_string());
- GotoIfException(promise_resolve, &close_iterator, var_exception);
-
- Node* const next_promise = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_resolve, constructor, next_value);
- GotoIfException(next_promise, &close_iterator, var_exception);
+ Node* const next_promise =
+ InvokeResolve(native_context, constructor, next_value, &close_iterator,
+ var_exception);
- // Let resolveElement be a new built-in function object as defined in
- // Promise.all Resolve Element Functions.
- Node* const resolve_context =
- CreatePromiseContext(native_context, kPromiseAllResolveElementLength);
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementIndexSlot, var_index.value());
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementRemainingElementsSlot,
- remaining_elements);
- StoreContextElementNoWriteBarrier(
- resolve_context, kPromiseAllResolveElementCapabilitySlot, capability);
- StoreContextElementNoWriteBarrier(resolve_context,
- kPromiseAllResolveElementValuesArraySlot,
- values_array);
-
- Node* const map = LoadContextElement(
- native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
- Node* const resolve_info = LoadContextElement(
- native_context, Context::PROMISE_ALL_RESOLVE_ELEMENT_SHARED_FUN);
- Node* const resolve =
- AllocateFunctionWithMapAndContext(map, resolve_info, resolve_context);
+ // Check if we reached the limit.
+ Node* const index = var_index.value();
+ GotoIf(SmiEqual(index, SmiConstant(PropertyArray::HashField::kMax)),
+ &too_many_elements);
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] + 1.
- {
- Label if_outofrange(this, Label::kDeferred), done(this);
- IncrementSmiCell(remaining_elements, &if_outofrange);
- Goto(&done);
-
- BIND(&if_outofrange);
- {
- // If the incremented value is out of Smi range, crash.
- Abort(AbortReason::kOffsetOutOfRange);
- }
-
- BIND(&done);
- }
+ Node* const remaining_elements_count = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot);
+ StoreContextElementNoWriteBarrier(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot,
+ SmiAdd(remaining_elements_count, SmiConstant(1)));
+
+ // Let resolveElement be CreateBuiltinFunction(steps,
+ // Ā« [[AlreadyCalled]],
+ // [[Index]],
+ // [[Values]],
+ // [[Capability]],
+ // [[RemainingElements]] Ā»).
+ // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false }.
+ // Set resolveElement.[[Index]] to index.
+ // Set resolveElement.[[Values]] to values.
+ // Set resolveElement.[[Capability]] to resultCapability.
+ // Set resolveElement.[[RemainingElements]] to remainingElementsCount.
+ Node* const resolve_element_fun = CreatePromiseAllResolveElementFunction(
+ resolve_element_context, index, native_context);
// Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
// resultCapability.[[Reject]] Ā»).
Node* const then =
- GetProperty(context, next_promise, factory()->then_string());
+ GetProperty(native_context, next_promise, factory()->then_string());
GotoIfException(then, &close_iterator, var_exception);
Node* const then_call = CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, then, next_promise, resolve,
+ native_context, then, next_promise, resolve_element_fun,
LoadObjectField(capability, PromiseCapability::kRejectOffset));
GotoIfException(then_call, &close_iterator, var_exception);
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
- SetPromiseHandledByIfTrue(context, instrumenting, then_call, [=]() {
+ SetPromiseHandledByIfTrue(native_context, instrumenting, then_call, [=]() {
// Load promiseCapability.[[Promise]]
return LoadObjectField(capability, PromiseCapability::kPromiseOffset);
});
- // Set index to index + 1
- var_index.Bind(NumberInc(var_index.value()));
+ // Set index to index + 1.
+ var_index.Bind(SmiAdd(index, SmiConstant(1)));
Goto(&loop);
}
+ BIND(&too_many_elements);
+ {
+ // If there are too many elements (currently more than 2**21-1), raise a
+ // RangeError here (which is caught directly and turned into a rejection)
+ // of the resulting promise. We could gracefully handle this case as well
+ // and support more than this number of elements by going to a separate
+ // function and pass the larger indices via a separate context, but it
+ // doesn't seem likely that we need this, and it's unclear how the rest
+ // of the system deals with 2**21 live Promises anyways.
+ Node* const result =
+ CallRuntime(Runtime::kThrowRangeError, native_context,
+ SmiConstant(MessageTemplate::kTooManyElementsInPromiseAll));
+ GotoIfException(result, &close_iterator, var_exception);
+ Unreachable();
+ }
+
BIND(&close_iterator);
{
// Exception must be bound to a JS value.
CSA_ASSERT(this, IsNotTheHole(var_exception->value()));
- iter_assembler.IteratorCloseOnException(context, iterator, if_exception,
- var_exception);
+ iter_assembler.IteratorCloseOnException(native_context, iterator,
+ if_exception, var_exception);
}
- BIND(&break_loop);
+ BIND(&done_loop);
{
- Label resolve_promise(this), return_promise(this);
+ Label resolve_promise(this, Label::kDeferred), return_promise(this);
// Set iteratorRecord.[[Done]] to true.
// Set remainingElementsCount.[[Value]] to
// remainingElementsCount.[[Value]] - 1.
- Node* const remaining = DecrementSmiCell(remaining_elements);
- Branch(SmiEqual(remaining, SmiConstant(0)), &resolve_promise,
- &return_promise);
+ Node* remaining_elements_count = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementRemainingSlot);
+ remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
+ StoreContextElementNoWriteBarrier(resolve_element_context,
+ kPromiseAllResolveElementRemainingSlot,
+ remaining_elements_count);
+ GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)),
+ &resolve_promise);
+
+ // Pre-allocate the backing store for the {values_array} to the desired
+ // capacity here. We may already have elements here in case of some
+ // fancy Thenable that calls the resolve callback immediately, so we need
+ // to handle that correctly here.
+ Node* const values_array = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const old_elements = LoadElements(values_array);
+ Node* const old_capacity = LoadFixedArrayBaseLength(old_elements);
+ Node* const new_capacity = var_index.value();
+ GotoIf(SmiGreaterThanOrEqual(old_capacity, new_capacity), &return_promise);
+ Node* const new_elements =
+ AllocateFixedArray(PACKED_ELEMENTS, new_capacity, SMI_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ CopyFixedArrayElements(PACKED_ELEMENTS, old_elements, PACKED_ELEMENTS,
+ new_elements, SmiConstant(0), old_capacity,
+ new_capacity, UPDATE_WRITE_BARRIER, SMI_PARAMETERS);
+ StoreObjectField(values_array, JSArray::kElementsOffset, new_elements);
+ Goto(&return_promise);
// If remainingElementsCount.[[Value]] is 0, then
// Let valuesArray be CreateArrayFromList(values).
// Perform ? Call(resultCapability.[[Resolve]], undefined,
// Ā« valuesArray Ā»).
BIND(&resolve_promise);
-
- Node* const resolve =
- LoadObjectField(capability, PromiseCapability::kResolveOffset);
- Node* const resolve_call = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
- context, resolve, UndefinedConstant(), values_array);
- GotoIfException(resolve_call, if_exception, var_exception);
- Goto(&return_promise);
+ {
+ Node* const resolve =
+ LoadObjectField(capability, PromiseCapability::kResolveOffset);
+ Node* const values_array = LoadContextElement(
+ resolve_element_context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const resolve_call = CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNullOrUndefined),
+ native_context, resolve, UndefinedConstant(), values_array);
+ GotoIfException(resolve_call, if_exception, var_exception);
+ Goto(&return_promise);
+ }
// Return resultCapability.[[Promise]].
BIND(&return_promise);
@@ -1829,31 +1982,6 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
return promise;
}
-Node* PromiseBuiltinsAssembler::IncrementSmiCell(Node* cell,
- Label* if_overflow) {
- CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
- Node* value = LoadCellValue(cell);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
-
- if (if_overflow != nullptr) {
- GotoIf(SmiEqual(value, SmiConstant(Smi::kMaxValue)), if_overflow);
- }
-
- Node* result = SmiAdd(value, SmiConstant(1));
- StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
- return result;
-}
-
-Node* PromiseBuiltinsAssembler::DecrementSmiCell(Node* cell) {
- CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
- Node* value = LoadCellValue(cell);
- CSA_SLOW_ASSERT(this, TaggedIsSmi(value));
-
- Node* result = SmiSub(value, SmiConstant(1));
- StoreCellValue(cell, result, SKIP_WRITE_BARRIER);
- return result;
-}
-
// ES#sec-promise.all
// Promise.all ( iterable )
TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
@@ -1910,60 +2038,96 @@ TF_BUILTIN(PromiseAll, PromiseBuiltinsAssembler) {
TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
+ Node* const function = LoadFromFrame(StandardFrameConstants::kFunctionOffset);
+ Label already_called(this, Label::kDeferred), resolve_promise(this);
+
+ // We use the {function}s context as the marker to remember whether this
+ // resolve element closure was already called. It points to the resolve
+ // element context (which is a FunctionContext) until it was called the
+ // first time, in which case we make it point to the native context here
+ // to mark this resolve element closure as done.
+ GotoIf(IsNativeContext(context), &already_called);
CSA_ASSERT(this, SmiEqual(LoadFixedArrayBaseLength(context),
SmiConstant(kPromiseAllResolveElementLength)));
+ Node* const native_context = LoadNativeContext(context);
+ StoreObjectField(function, JSFunction::kContextOffset, native_context);
- Node* const index =
- LoadContextElement(context, kPromiseAllResolveElementIndexSlot);
+ // Determine the index from the {function}.
+ Label unreachable(this, Label::kDeferred);
+ STATIC_ASSERT(PropertyArray::kNoHashSentinel == 0);
+ Node* const identity_hash =
+ LoadJSReceiverIdentityHash(function, &unreachable);
+ CSA_ASSERT(this, IntPtrGreaterThan(identity_hash, IntPtrConstant(0)));
+ Node* const index = IntPtrSub(identity_hash, IntPtrConstant(1));
+
+ // Check if we need to grow the [[ValuesArray]] to store {value} at {index}.
Node* const values_array =
LoadContextElement(context, kPromiseAllResolveElementValuesArraySlot);
+ Node* const elements = LoadElements(values_array);
+ Node* const values_length =
+ LoadAndUntagObjectField(values_array, JSArray::kLengthOffset);
+ Label if_inbounds(this), if_outofbounds(this), done(this);
+ Branch(IntPtrLessThan(index, values_length), &if_inbounds, &if_outofbounds);
- Label already_called(this, Label::kDeferred), resolve_promise(this);
- GotoIf(SmiLessThan(index, SmiConstant(Smi::kZero)), &already_called);
- StoreContextElementNoWriteBarrier(context, kPromiseAllResolveElementIndexSlot,
- SmiConstant(-1));
+ BIND(&if_outofbounds);
+ {
+ // Check if we need to grow the backing store.
+ Node* const new_length = IntPtrAdd(index, IntPtrConstant(1));
+ Node* const elements_length =
+ LoadAndUntagObjectField(elements, FixedArray::kLengthOffset);
+ Label if_grow(this, Label::kDeferred), if_nogrow(this);
+ Branch(IntPtrLessThan(index, elements_length), &if_nogrow, &if_grow);
+
+ BIND(&if_grow);
+ {
+ // We need to grow the backing store to fit the {index} as well.
+ Node* const new_elements_length =
+ IntPtrMin(CalculateNewElementsCapacity(new_length),
+ IntPtrConstant(PropertyArray::HashField::kMax + 1));
+ CSA_ASSERT(this, IntPtrLessThan(index, new_elements_length));
+ CSA_ASSERT(this, IntPtrLessThan(elements_length, new_elements_length));
+ Node* const new_elements = AllocateFixedArray(
+ PACKED_ELEMENTS, new_elements_length, INTPTR_PARAMETERS,
+ AllocationFlag::kAllowLargeObjectAllocation);
+ CopyFixedArrayElements(PACKED_ELEMENTS, elements, PACKED_ELEMENTS,
+ new_elements, elements_length,
+ new_elements_length);
+ StoreFixedArrayElement(new_elements, index, value);
+
+ // Update backing store and "length" on {values_array}.
+ StoreObjectField(values_array, JSArray::kElementsOffset, new_elements);
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ SmiTag(new_length));
+ Goto(&done);
+ }
+
+ BIND(&if_nogrow);
+ {
+ // The {index} is within bounds of the {elements} backing store, so
+ // just store the {value} and update the "length" of the {values_array}.
+ StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
+ SmiTag(new_length));
+ StoreFixedArrayElement(elements, index, value);
+ Goto(&done);
+ }
+ }
- // Set element in FixedArray
- Label runtime_set_element(this), did_set_element(this);
- GotoIfNot(TaggedIsPositiveSmi(index), &runtime_set_element);
+ BIND(&if_inbounds);
{
- VARIABLE(var_elements, MachineRepresentation::kTagged,
- LoadElements(values_array));
- PossiblyGrowElementsCapacity(SMI_PARAMETERS, PACKED_ELEMENTS, values_array,
- index, &var_elements, SmiConstant(1),
- &runtime_set_element);
- StoreFixedArrayElement(var_elements.value(), index, value,
- UPDATE_WRITE_BARRIER, 0, SMI_PARAMETERS);
-
- // Update array length
- Label did_set_length(this);
- Node* const length = LoadJSArrayLength(values_array);
- GotoIfNot(TaggedIsPositiveSmi(length), &did_set_length);
- Node* const new_length = SmiAdd(index, SmiConstant(1));
- GotoIfNot(SmiLessThan(length, new_length), &did_set_length);
- StoreObjectFieldNoWriteBarrier(values_array, JSArray::kLengthOffset,
- new_length);
- // Assert that valuesArray.[[Length]] is less than or equal to the
- // elements backing-store length.e
- CSA_SLOW_ASSERT(
- this, SmiAboveOrEqual(LoadFixedArrayBaseLength(var_elements.value()),
- new_length));
- Goto(&did_set_length);
- BIND(&did_set_length);
+ // The {index} is in bounds of the {values_array},
+ // just store the {value} and continue.
+ StoreFixedArrayElement(elements, index, value);
+ Goto(&done);
}
- Goto(&did_set_element);
- BIND(&runtime_set_element);
- // New-space filled up or index too large, set element via runtime
- CallRuntime(Runtime::kCreateDataProperty, context, values_array, index,
- value);
- Goto(&did_set_element);
- BIND(&did_set_element);
-
- Node* const remaining_elements = LoadContextElement(
- context, kPromiseAllResolveElementRemainingElementsSlot);
- Node* const result = DecrementSmiCell(remaining_elements);
- GotoIf(SmiEqual(result, SmiConstant(0)), &resolve_promise);
+
+ BIND(&done);
+ Node* remaining_elements_count =
+ LoadContextElement(context, kPromiseAllResolveElementRemainingSlot);
+ remaining_elements_count = SmiSub(remaining_elements_count, SmiConstant(1));
+ StoreContextElement(context, kPromiseAllResolveElementRemainingSlot,
+ remaining_elements_count);
+ GotoIf(SmiEqual(remaining_elements_count, SmiConstant(0)), &resolve_promise);
Return(UndefinedConstant());
BIND(&resolve_promise);
@@ -1977,6 +2141,9 @@ TF_BUILTIN(PromiseAllResolveElementClosure, PromiseBuiltinsAssembler) {
BIND(&already_called);
Return(UndefinedConstant());
+
+ BIND(&unreachable);
+ Unreachable();
}
// ES#sec-promise.race
@@ -2043,15 +2210,9 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
&reject_promise, &var_exception);
// Let nextPromise be ? Invoke(constructor, "resolve", Ā« nextValue Ā»).
- Node* const promise_resolve =
- GetProperty(context, receiver, factory()->resolve_string());
- GotoIfException(promise_resolve, &close_iterator, &var_exception);
-
Node* const next_promise =
- CallJS(CodeFactory::Call(isolate(),
- ConvertReceiverMode::kNotNullOrUndefined),
- context, promise_resolve, receiver, next_value);
- GotoIfException(next_promise, &close_iterator, &var_exception);
+ InvokeResolve(native_context, receiver, next_value, &close_iterator,
+ &var_exception);
// Perform ? Invoke(nextPromise, "then", Ā« resolveElement,
// resultCapability.[[Reject]] Ā»).
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index 2130101e84..f21d86a141 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -30,11 +30,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
protected:
enum PromiseAllResolveElementContextSlots {
- // Index into the values array, or -1 if the callback was already called
- kPromiseAllResolveElementIndexSlot = Context::MIN_CONTEXT_SLOTS,
-
- // Remaining elements count (mutable HeapNumber)
- kPromiseAllResolveElementRemainingElementsSlot,
+ // Remaining elements count
+ kPromiseAllResolveElementRemainingSlot = Context::MIN_CONTEXT_SLOTS,
// Promise capability from Promise.all
kPromiseAllResolveElementCapabilitySlot,
@@ -105,6 +102,18 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* PromiseHasHandler(Node* promise);
+ // Creates the context used by all Promise.all resolve element closures,
+ // together with the values array. Since all closures for a single Promise.all
+ // call use the same context, we need to store the indices for the individual
+ // closures somewhere else (we put them into the identity hash field of the
+ // closures), and we also need to have a separate marker for when the closure
+ // was called already (we slap the native context onto the closure in that
+ // case to mark it's done).
+ Node* CreatePromiseAllResolveElementContext(Node* promise_capability,
+ Node* native_context);
+ Node* CreatePromiseAllResolveElementFunction(Node* context, Node* index,
+ Node* native_context);
+
Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
Node* native_context);
@@ -126,6 +135,14 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* TriggerPromiseReactions(Node* context, Node* promise, Node* result,
PromiseReaction::Type type);
+ // We can skip the "resolve" lookup on {constructor} if it's the (initial)
+ // Promise constructor and the Promise.resolve() protector is intact, as
+ // that guards the lookup path for the "resolve" property on the %Promise%
+ // intrinsic object.
+ void BranchIfPromiseResolveLookupChainIntact(Node* native_context,
+ Node* constructor,
+ Label* if_fast, Label* if_slow);
+
// We can shortcut the SpeciesConstructor on {promise_map} if it's
// [[Prototype]] is the (initial) Promise.prototype and the @@species
// protector is intact, as that guards the lookup path for the "constructor"
@@ -142,6 +159,8 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* receiver_map, Label* if_fast,
Label* if_slow);
+ Node* InvokeResolve(Node* native_context, Node* constructor, Node* value,
+ Label* if_exception, Variable* var_exception);
template <typename... TArgs>
Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
@@ -160,9 +179,6 @@ class PromiseBuiltinsAssembler : public CodeStubAssembler {
const IteratorRecord& record, Label* if_exception,
Variable* var_exception);
- Node* IncrementSmiCell(Node* cell, Label* if_overflow = nullptr);
- Node* DecrementSmiCell(Node* cell);
-
void SetForwardingHandlerIfTrue(Node* context, Node* condition,
const NodeGenerator& object);
inline void SetForwardingHandlerIfTrue(Node* context, Node* condition,
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index fb35f48a15..a4c208ac4e 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -13,12 +13,6 @@
namespace v8 {
namespace internal {
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Call]] case.
-TF_BUILTIN(ProxyConstructor, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
-}
-
void ProxiesCodeStubAssembler::GotoIfRevokedProxy(Node* object,
Label* if_proxy_revoked) {
Label proxy_not_revoked(this);
@@ -127,40 +121,6 @@ Node* ProxiesCodeStubAssembler::AllocateJSArrayForCodeStubArguments(
return array;
}
-// ES6 section 26.2.1.1 Proxy ( target, handler ) for the [[Construct]] case.
-TF_BUILTIN(ProxyConstructor_ConstructStub, ProxiesCodeStubAssembler) {
- int const kTargetArg = 0;
- int const kHandlerArg = 1;
-
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
-
- Node* target = args.GetOptionalArgumentValue(kTargetArg);
- Node* handler = args.GetOptionalArgumentValue(kHandlerArg);
- Node* context = Parameter(BuiltinDescriptor::kContext);
-
- Label throw_proxy_non_object(this, Label::kDeferred),
- throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
- return_create_proxy(this);
-
- GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
- GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
- GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
-
- GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
- GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
-
- args.PopAndReturn(AllocateProxy(target, handler, context));
-
- BIND(&throw_proxy_non_object);
- ThrowTypeError(context, MessageTemplate::kProxyNonObject);
-
- BIND(&throw_proxy_handler_or_target_revoked);
- ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
-}
-
Node* ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext(
Node* proxy, Node* native_context) {
Node* const context = Allocate(FixedArray::SizeFor(kProxyContextLength));
@@ -185,6 +145,65 @@ Node* ProxiesCodeStubAssembler::AllocateProxyRevokeFunction(Node* proxy,
proxy_context);
}
+// ES #sec-proxy-constructor
+TF_BUILTIN(ProxyConstructor, ProxiesCodeStubAssembler) {
+ Node* context = Parameter(Descriptor::kContext);
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ Node* new_target = Parameter(Descriptor::kNewTarget);
+ Label throwtypeerror(this, Label::kDeferred), createproxy(this);
+ Branch(IsUndefined(new_target), &throwtypeerror, &createproxy);
+
+ BIND(&throwtypeerror);
+ {
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, "Proxy");
+ }
+
+ // 2. Return ? ProxyCreate(target, handler).
+ BIND(&createproxy);
+ {
+ // https://tc39.github.io/ecma262/#sec-proxycreate
+ Node* target = Parameter(Descriptor::kTarget);
+ Node* handler = Parameter(Descriptor::kHandler);
+
+ // 1. If Type(target) is not Object, throw a TypeError exception.
+ // 2. If target is a Proxy exotic object and target.[[ProxyHandler]] is
+ // null, throw a TypeError exception.
+ // 3. If Type(handler) is not Object, throw a TypeError exception.
+ // 4. If handler is a Proxy exotic object and handler.[[ProxyHandler]]
+ // is null, throw a TypeError exception.
+ Label throw_proxy_non_object(this, Label::kDeferred),
+ throw_proxy_handler_or_target_revoked(this, Label::kDeferred),
+ return_create_proxy(this);
+
+ GotoIf(TaggedIsSmi(target), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(target), &throw_proxy_non_object);
+ GotoIfRevokedProxy(target, &throw_proxy_handler_or_target_revoked);
+
+ GotoIf(TaggedIsSmi(handler), &throw_proxy_non_object);
+ GotoIfNot(IsJSReceiver(handler), &throw_proxy_non_object);
+ GotoIfRevokedProxy(handler, &throw_proxy_handler_or_target_revoked);
+
+ // 5. Let P be a newly created object.
+ // 6. Set P's essential internal methods (except for [[Call]] and
+ // [[Construct]]) to the definitions specified in 9.5.
+ // 7. If IsCallable(target) is true, then
+ // a. Set P.[[Call]] as specified in 9.5.12.
+ // b. If IsConstructor(target) is true, then
+ // 1. Set P.[[Construct]] as specified in 9.5.13.
+ // 8. Set P.[[ProxyTarget]] to target.
+ // 9. Set P.[[ProxyHandler]] to handler.
+ // 10. Return P.
+ Return(AllocateProxy(target, handler, context));
+
+ BIND(&throw_proxy_non_object);
+ ThrowTypeError(context, MessageTemplate::kProxyNonObject);
+
+ BIND(&throw_proxy_handler_or_target_revoked);
+ ThrowTypeError(context, MessageTemplate::kProxyHandlerOrTargetRevoked);
+ }
+}
+
TF_BUILTIN(ProxyRevocable, ProxiesCodeStubAssembler) {
Node* const target = Parameter(Descriptor::kTarget);
Node* const handler = Parameter(Descriptor::kHandler);
@@ -439,8 +458,7 @@ TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
BIND(&trap_undefined);
{
// 7.a. Return ? target.[[HasProperty]](P).
- TailCallStub(Builtins::CallableFor(isolate(), Builtins::kHasProperty),
- context, name, target);
+ TailCallBuiltin(Builtins::kHasProperty, context, name, target);
}
BIND(&return_false);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 45329eed70..2cc354cb94 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -11,7 +11,8 @@
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
+#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -20,13 +21,15 @@ namespace v8 {
namespace internal {
using compiler::Node;
+template <class T>
+using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
- CSA_ASSERT(this, IsFixedArray(context));
+ CSA_ASSERT(this, IsContext(context));
CSA_ASSERT(this, TaggedIsSmi(index));
CSA_ASSERT(this, TaggedIsSmi(length));
CSA_ASSERT(this, IsString(input));
@@ -88,6 +91,28 @@ Node* RegExpBuiltinsAssembler::AllocateRegExpResult(Node* context, Node* length,
return result;
}
+TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(
+ TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> maybe_string, TNode<String> flags) {
+ TNode<JSFunction> regexp_function =
+ CAST(LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+ return RegExpCreate(context, initial_map, maybe_string, flags);
+}
+
+TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(TNode<Context> context,
+ TNode<Map> initial_map,
+ TNode<Object> maybe_string,
+ TNode<String> flags) {
+ TNode<String> pattern = Select<String>(
+ IsUndefined(maybe_string), [=] { return EmptyStringConstant(); },
+ [=] { return ToString_Inline(context, maybe_string); });
+ TNode<Object> regexp = CAST(AllocateJSObjectFromMap(initial_map));
+ return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
+ pattern, flags);
+}
+
Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
// Load the in-object field.
static const int field_offset =
@@ -142,8 +167,8 @@ Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
Label named_captures(this), out(this);
- TNode<IntPtrT> num_indices = SmiUntag(LoadFixedArrayElement(
- match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
+ TNode<IntPtrT> num_indices = SmiUntag(CAST(LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kNumberOfCapturesIndex)));
Node* const num_results = SmiTag(WordShr(num_indices, 1));
Node* const start =
LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
@@ -1185,16 +1210,14 @@ Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
CSA_ASSERT(this, IsJSRegExp(regexp));
// Normalize pattern.
- Node* const pattern = Select<Object>(
+ TNode<Object> const pattern = Select<Object>(
IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_pattern); },
- MachineRepresentation::kTagged);
+ [=] { return ToString_Inline(context, maybe_pattern); });
// Normalize flags.
- Node* const flags = Select<Object>(
+ TNode<Object> const flags = Select<Object>(
IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_flags); },
- MachineRepresentation::kTagged);
+ [=] { return ToString_Inline(context, maybe_flags); });
// Initialize.
@@ -1843,30 +1866,9 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Branch(IsNull(result), &if_didnotmatch, &load_match);
BIND(&load_match);
- {
- Label fast_result(this), slow_result(this);
- BranchIfFastRegExpResult(context, result, &fast_result, &slow_result);
-
- BIND(&fast_result);
- {
- Node* const result_fixed_array = LoadElements(result);
- Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
-
- // The match is guaranteed to be a string on the fast path.
- CSA_ASSERT(this, IsString(match));
-
- var_match.Bind(match);
- Goto(&if_didmatch);
- }
-
- BIND(&slow_result);
- {
- // TODO(ishell): Use GetElement stub once it's available.
- Node* const match = GetProperty(context, result, smi_zero);
- var_match.Bind(ToString_Inline(context, match));
- Goto(&if_didmatch);
- }
- }
+ Node* const match = GetProperty(context, result, smi_zero);
+ var_match.Bind(ToString_Inline(context, match));
+ Goto(&if_didmatch);
}
BIND(&if_didnotmatch);
@@ -1951,6 +1953,163 @@ TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
RegExpPrototypeMatchBody(context, receiver, string, false);
}
+TNode<Object> RegExpBuiltinsAssembler::MatchAllIterator(
+ TNode<Context> context, TNode<Context> native_context,
+ TNode<Object> maybe_regexp, TNode<Object> maybe_string,
+ char const* method_name) {
+ Label create_iterator(this), if_regexp(this), if_not_regexp(this),
+ throw_type_error(this, Label::kDeferred);
+
+ // 1. Let S be ? ToString(O).
+ TNode<String> string = ToString_Inline(context, maybe_string);
+ TVARIABLE(Object, var_matcher);
+ TVARIABLE(Int32T, var_global);
+ TVARIABLE(Int32T, var_unicode);
+
+ // 2. If ? IsRegExp(R) is true, then
+ Branch(IsRegExp(context, maybe_regexp), &if_regexp, &if_not_regexp);
+ BIND(&if_regexp);
+ {
+ // a. Let C be ? SpeciesConstructor(R, %RegExp%).
+ TNode<Object> regexp_fun =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ TNode<Object> species_constructor =
+ SpeciesConstructor(native_context, maybe_regexp, regexp_fun);
+
+ // b. Let flags be ? ToString(? Get(R, "flags")).
+ // TODO(pwong): Add fast path to avoid property lookup.
+ TNode<Object> flags = GetProperty(context, maybe_regexp,
+ isolate()->factory()->flags_string());
+ TNode<Object> flags_string = ToString_Inline(context, flags);
+
+ // c. Let matcher be ? Construct(C, Ā« R, flags Ā»).
+ var_matcher =
+ CAST(ConstructJS(CodeFactory::Construct(isolate()), context,
+ species_constructor, maybe_regexp, flags_string));
+
+ // d. Let global be ? ToBoolean(? Get(matcher, "global")).
+ // TODO(pwong): Add fast path for loading flags.
+ var_global = UncheckedCast<Int32T>(
+ SlowFlagGetter(context, var_matcher.value(), JSRegExp::kGlobal));
+
+ // e. Let fullUnicode be ? ToBoolean(? Get(matcher, "unicode").
+ // TODO(pwong): Add fast path for loading flags.
+ var_unicode = UncheckedCast<Int32T>(
+ SlowFlagGetter(context, var_matcher.value(), JSRegExp::kUnicode));
+
+ // f. Let lastIndex be ? ToLength(? Get(R, "lastIndex")).
+ // TODO(pwong): Add fast path for loading last index.
+ TNode<Number> last_index = UncheckedCast<Number>(
+ ToLength_Inline(context, SlowLoadLastIndex(context, maybe_regexp)));
+
+ // g. Perform ? Set(matcher, "lastIndex", lastIndex, true).
+ // TODO(pwong): Add fast path for storing last index.
+ SlowStoreLastIndex(context, var_matcher.value(), last_index);
+
+ Goto(&create_iterator);
+ }
+ // 3. Else,
+ BIND(&if_not_regexp);
+ {
+ // a. Let flags be "g".
+ // b. Let matcher be ? RegExpCreate(R, flags).
+ var_matcher = RegExpCreate(context, native_context, maybe_regexp,
+ StringConstant("g"));
+
+ // c. If ? IsRegExp(matcher) is not true, throw a TypeError exception.
+ GotoIfNot(IsRegExp(context, var_matcher.value()), &throw_type_error);
+
+ // d. Let global be true.
+ var_global = Int32Constant(1);
+
+ // e. Let fullUnicode be false.
+ var_unicode = Int32Constant(0);
+
+ // f. If ? Get(matcher, "lastIndex") is not 0, throw a TypeError exception.
+ TNode<Object> last_index =
+ CAST(LoadLastIndex(context, var_matcher.value(), false));
+ Branch(SmiEqual(SmiConstant(0), last_index), &create_iterator,
+ &throw_type_error);
+ }
+ BIND(&throw_type_error);
+ {
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant(method_name), maybe_regexp);
+ }
+ // 4. Return ! CreateRegExpStringIterator(matcher, S, global, fullUnicode).
+ // CreateRegExpStringIterator ( R, S, global, fullUnicode )
+ BIND(&create_iterator);
+ {
+ TNode<Map> map = CAST(LoadContextElement(
+ native_context,
+ Context::INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX));
+
+ // 4. Let iterator be ObjectCreate(%RegExpStringIteratorPrototype%, Ā«
+ // [[IteratingRegExp]], [[IteratedString]], [[Global]], [[Unicode]],
+ // [[Done]] Ā»).
+ TNode<Object> iterator = CAST(Allocate(JSRegExpStringIterator::kSize));
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator,
+ JSRegExpStringIterator::kPropertiesOrHashOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSRegExpStringIterator::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+
+ // 5. Set iterator.[[IteratingRegExp]] to R.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratingRegExpOffset,
+ var_matcher.value());
+
+ // 6. Set iterator.[[IteratedString]] to S.
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kIteratedStringOffset, string);
+
+#ifdef DEBUG
+ // Verify global and unicode can be bitwise shifted without masking.
+ TNode<Int32T> zero = Int32Constant(0);
+ TNode<Int32T> one = Int32Constant(1);
+ CSA_ASSERT(this, Word32Or(Word32Equal(var_global.value(), zero),
+ Word32Equal(var_global.value(), one)));
+ CSA_ASSERT(this, Word32Or(Word32Equal(var_unicode.value(), zero),
+ Word32Equal(var_unicode.value(), one)));
+#endif // DEBUG
+
+ // 7. Set iterator.[[Global]] to global.
+ // 8. Set iterator.[[Unicode]] to fullUnicode.
+ // 9. Set iterator.[[Done]] to false.
+ TNode<Word32T> global_flag = Word32Shl(
+ var_global.value(), Int32Constant(JSRegExpStringIterator::kGlobalBit));
+ TNode<Word32T> unicode_flag =
+ Word32Shl(var_unicode.value(),
+ Int32Constant(JSRegExpStringIterator::kUnicodeBit));
+ TNode<Word32T> iterator_flags = Word32Or(global_flag, unicode_flag);
+ StoreObjectFieldNoWriteBarrier(iterator,
+ JSRegExpStringIterator::kFlagsOffset,
+ SmiFromInt32(Signed(iterator_flags)));
+
+ return iterator;
+ }
+}
+
+// https://tc39.github.io/proposal-string-matchall/
+// RegExp.prototype [ @@matchAll ] ( string )
+TF_BUILTIN(RegExpPrototypeMatchAll, RegExpBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> string = CAST(Parameter(Descriptor::kString));
+
+ // 1. Let R be the this value.
+ // 2. If Type(R) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@matchAll");
+
+ // 3. Return ? MatchAllIterator(R, string).
+ Return(MatchAllIterator(context, native_context, receiver, string,
+ "RegExp.prototype.@@matchAll"));
+}
+
// Helper that skips a few initial checks. and assumes...
// 1) receiver is a "fast" RegExp
// 2) pattern is a string
@@ -2888,5 +3047,174 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
}
}
+class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
+ public:
+ explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state)
+ : RegExpBuiltinsAssembler(state) {}
+
+ protected:
+ TNode<Smi> LoadFlags(TNode<HeapObject> iterator) {
+ return LoadObjectField<Smi>(iterator, JSRegExpStringIterator::kFlagsOffset);
+ }
+
+ TNode<BoolT> HasDoneFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kDoneBit));
+ }
+
+ TNode<BoolT> HasGlobalFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kGlobalBit));
+ }
+
+ TNode<BoolT> HasUnicodeFlag(TNode<Smi> flags) {
+ return UncheckedCast<BoolT>(
+ IsSetSmi(flags, 1 << JSRegExpStringIterator::kUnicodeBit));
+ }
+
+ void SetDoneFlag(TNode<HeapObject> iterator, TNode<Smi> flags) {
+ TNode<Smi> new_flags =
+ SmiOr(flags, SmiConstant(1 << JSRegExpStringIterator::kDoneBit));
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSRegExpStringIterator::kFlagsOffset, new_flags);
+ }
+};
+
+// https://tc39.github.io/proposal-string-matchall/
+// %RegExpStringIteratorPrototype%.next ( )
+TF_BUILTIN(RegExpStringIteratorPrototypeNext, RegExpStringIteratorAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> maybe_receiver = CAST(Parameter(Descriptor::kReceiver));
+
+ Label if_match(this), if_no_match(this, Label::kDeferred),
+ return_empty_done_result(this, Label::kDeferred),
+ throw_bad_receiver(this, Label::kDeferred);
+
+ // 1. Let O be the this value.
+ // 2. If Type(O) is not Object, throw a TypeError exception.
+ GotoIf(TaggedIsSmi(maybe_receiver), &throw_bad_receiver);
+ GotoIfNot(IsJSReceiver(maybe_receiver), &throw_bad_receiver);
+ TNode<HeapObject> receiver = CAST(maybe_receiver);
+
+ // 3. If O does not have all of the internal slots of a RegExp String Iterator
+ // Object Instance (see 5.3), throw a TypeError exception.
+ GotoIfNot(InstanceTypeEqual(LoadInstanceType(receiver),
+ JS_REGEXP_STRING_ITERATOR_TYPE),
+ &throw_bad_receiver);
+
+ // 4. If O.[[Done]] is true, then
+ // a. Return ! CreateIterResultObject(undefined, true).
+ TNode<Smi> flags = LoadFlags(receiver);
+ GotoIf(HasDoneFlag(flags), &return_empty_done_result);
+
+ // 5. Let R be O.[[IteratingRegExp]].
+ TNode<Object> iterating_regexp =
+ LoadObjectField(receiver, JSRegExpStringIterator::kIteratingRegExpOffset);
+
+ // 6. Let S be O.[[IteratedString]].
+ TNode<String> iterating_string = CAST(
+ LoadObjectField(receiver, JSRegExpStringIterator::kIteratedStringOffset));
+
+ // 7. Let global be O.[[Global]].
+ // See if_match.
+
+ // 8. Let fullUnicode be O.[[Unicode]].
+ // See if_global.
+
+ // 9. Let match be ? RegExpExec(R, S).
+ TVARIABLE(Object, var_match);
+ {
+ Label if_fast(this), if_slow(this), next(this);
+ BranchIfFastRegExp(context, iterating_regexp, &if_fast, &if_slow);
+ BIND(&if_fast);
+ {
+ var_match = CAST(RegExpPrototypeExecBody(context, iterating_regexp,
+ iterating_string, true));
+ Goto(&next);
+ }
+ BIND(&if_slow);
+ {
+ var_match = CAST(RegExpExec(context, iterating_regexp, iterating_string));
+ Goto(&next);
+ }
+ BIND(&next);
+ }
+
+ // 10. If match is null, then
+ Branch(IsNull(var_match.value()), &if_no_match, &if_match);
+ BIND(&if_no_match);
+ {
+ // a. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // b. Return ! CreateIterResultObject(undefined, true).
+ Goto(&return_empty_done_result);
+ }
+ // 11. Else,
+ BIND(&if_match);
+ {
+ Label if_global(this), if_not_global(this, Label::kDeferred);
+
+ // a. If global is true,
+ Branch(HasGlobalFlag(flags), &if_global, &if_not_global);
+ BIND(&if_global);
+ {
+ // i. Let matchStr be ? ToString(? Get(match, "0")).
+ // TODO(pwong): Add fast path for fast regexp results. See
+ // BranchIfFastRegExpResult().
+ TNode<Object> match_str = ToString_Inline(
+ context, GetProperty(context, var_match.value(),
+ isolate()->factory()->zero_string()));
+
+ // ii. If matchStr is the empty string,
+ {
+ Label next(this);
+ GotoIfNot(IsEmptyString(match_str), &next);
+
+ // 1. Let thisIndex be ? ToLength(? Get(R, "lastIndex")).
+ // TODO(pwong): Add fast path for loading last index.
+ TNode<Object> last_index =
+ CAST(SlowLoadLastIndex(context, iterating_regexp));
+ TNode<Number> this_index = ToLength_Inline(context, last_index);
+
+ // 2. Let nextIndex be ! AdvanceStringIndex(S, thisIndex, fullUnicode).
+ TNode<Object> next_index = CAST(AdvanceStringIndex(
+ iterating_string, this_index, HasUnicodeFlag(flags), false));
+
+ // 3. Perform ? Set(R, "lastIndex", nextIndex, true).
+ // TODO(pwong): Add fast path for storing last index.
+ SlowStoreLastIndex(context, iterating_regexp, next_index);
+
+ Goto(&next);
+ BIND(&next);
+ }
+
+ // iii. Return ! CreateIterResultObject(match, false).
+ Return(AllocateJSIteratorResult(context, var_match.value(),
+ FalseConstant()));
+ }
+ // b. Else,
+ BIND(&if_not_global);
+ {
+ // i. Set O.[[Done]] to true.
+ SetDoneFlag(receiver, flags);
+
+ // ii. Return ! CreateIterResultObject(match, false).
+ Return(AllocateJSIteratorResult(context, var_match.value(),
+ FalseConstant()));
+ }
+ }
+ BIND(&return_empty_done_result);
+ Return(
+ AllocateJSIteratorResult(context, UndefinedConstant(), TrueConstant()));
+
+ BIND(&throw_bad_receiver);
+ {
+ ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
+ StringConstant("%RegExpStringIterator%.prototype.next"),
+ receiver);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index b57b90acf9..2146da5c0e 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -19,6 +19,19 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
Node* const map, Label* const if_isunmodified,
Label* const if_ismodified);
+ // Create and initialize a RegExp object.
+ TNode<Object> RegExpCreate(TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> regexp_string, TNode<String> flags);
+
+ TNode<Object> RegExpCreate(TNode<Context> context, TNode<Map> initial_map,
+ TNode<Object> regexp_string, TNode<String> flags);
+
+ TNode<Object> MatchAllIterator(TNode<Context> context,
+ TNode<Context> native_context,
+ TNode<Object> regexp, TNode<Object> string,
+ char const* method_name);
+
protected:
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 8edb3574cd..a847a5d892 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -10,9 +10,9 @@
#include "src/code-factory.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
-#include "src/factory.h"
#include "src/futex-emulation.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -34,7 +34,7 @@ BUILTIN(AtomicsIsLockFree) {
}
// ES #sec-validatesharedintegertypedarray
-MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
+V8_WARN_UNUSED_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
Isolate* isolate, Handle<Object> object, bool only_int32 = false) {
if (object->IsJSTypedArray()) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
@@ -60,7 +60,7 @@ MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
// ES #sec-validateatomicaccess
// ValidateAtomicAccess( typedArray, requestIndex )
-MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess(
+V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
Isolate* isolate, Handle<JSTypedArray> typed_array,
Handle<Object> request_index) {
Handle<Object> access_index_obj;
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 5cc4621b84..bc9578514a 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -8,7 +8,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/objects.h"
namespace v8 {
@@ -525,7 +525,7 @@ TF_BUILTIN(StringCharAt, StringBuiltinsAssembler) {
TNode<Int32T> code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}
- TNode<String> result = StringFromCharCode(code);
+ TNode<String> result = StringFromSingleCharCode(code);
Return(result);
}
@@ -586,7 +586,7 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
Node* code32 = TruncateTaggedToWord32(context, code);
TNode<Int32T> code16 =
Signed(Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit)));
- Node* result = StringFromCharCode(code16);
+ Node* result = StringFromSingleCharCode(code16);
arguments.PopAndReturn(result);
}
@@ -682,7 +682,7 @@ TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) {
[this](TNode<String> string, TNode<IntPtrT> length,
TNode<IntPtrT> index) {
TNode<Int32T> code = StringCharCodeAt(string, index);
- return StringFromCharCode(code);
+ return StringFromSingleCharCode(code);
});
}
@@ -1024,7 +1024,7 @@ void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
Node* const context, Node* const object, Node* const maybe_string,
Handle<Symbol> symbol, const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call, CodeStubArguments* args) {
+ const NodeFunction1& generic_call) {
Label out(this);
// Smis definitely don't have an attached symbol.
@@ -1069,12 +1069,7 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
BIND(&stub_call);
// TODO(jgruber): Add a no-JS scope once it exists.
- Node* const result = regexp_call();
- if (args == nullptr) {
- Return(result);
- } else {
- args->PopAndReturn(result);
- }
+ regexp_call();
BIND(&slow_lookup);
}
@@ -1094,12 +1089,7 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
GotoIf(IsNull(maybe_func), &out);
// Attempt to call the function.
- Node* const result = generic_call(maybe_func);
- if (args == nullptr) {
- Return(result);
- } else {
- args->PopAndReturn(result);
- }
+ generic_call(maybe_func);
BIND(&out);
}
@@ -1294,12 +1284,12 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, search, receiver, isolate()->factory()->replace_symbol(),
[=]() {
- return CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
- replace);
+ Return(CallBuiltin(Builtins::kRegExpReplace, context, search, receiver,
+ replace));
},
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, search, receiver, replace);
+ Return(CallJS(call_callable, context, fn, search, receiver, replace));
});
// Convert {receiver} and {search} to strings.
@@ -1439,8 +1429,9 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
protected:
enum Variant { kMatch, kSearch };
- void Generate(Variant variant, const char* method_name, Node* const receiver,
- Node* maybe_regexp, Node* const context) {
+ void Generate(Variant variant, const char* method_name,
+ TNode<Object> receiver, TNode<Object> maybe_regexp,
+ TNode<Context> context) {
Label call_regexp_match_search(this);
Builtins::Name builtin;
@@ -1457,33 +1448,24 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
MaybeCallFunctionAtSymbol(
context, maybe_regexp, receiver, symbol,
- [=] { return CallBuiltin(builtin, context, maybe_regexp, receiver); },
+ [=] { Return(CallBuiltin(builtin, context, maybe_regexp, receiver)); },
[=](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, maybe_regexp, receiver);
+ Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
});
// maybe_regexp is not a RegExp nor has [@@match / @@search] property.
{
RegExpBuiltinsAssembler regexp_asm(state());
- Node* const receiver_string = ToString_Inline(context, receiver);
- Node* const pattern = Select(
- IsUndefined(maybe_regexp), [=] { return EmptyStringConstant(); },
- [=] { return ToString_Inline(context, maybe_regexp); },
- MachineRepresentation::kTagged);
-
- // Create RegExp
- // TODO(pwong): This could be factored out as a helper (RegExpCreate) that
- // also does the "is fast" checks.
- Node* const native_context = LoadNativeContext(context);
- Node* const regexp_function =
- LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map = LoadObjectField(
- regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const regexp = CallRuntime(
- Runtime::kRegExpInitializeAndCompile, context,
- AllocateJSObjectFromMap(initial_map), pattern, EmptyStringConstant());
+ TNode<String> receiver_string = ToString_Inline(context, receiver);
+ TNode<Context> native_context = LoadNativeContext(context);
+ TNode<HeapObject> regexp_function = CAST(
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX));
+ TNode<Map> initial_map = CAST(LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset));
+ TNode<Object> regexp = regexp_asm.RegExpCreate(
+ context, initial_map, maybe_regexp, EmptyStringConstant());
Label fast_path(this), slow_path(this);
regexp_asm.BranchIfFastRegExp(context, regexp, initial_map, &fast_path,
@@ -1494,7 +1476,7 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
BIND(&slow_path);
{
- Node* const maybe_func = GetProperty(context, regexp, symbol);
+ TNode<Object> maybe_func = GetProperty(context, regexp, symbol);
Callable call_callable = CodeFactory::Call(isolate());
Return(CallJS(call_callable, context, maybe_func, regexp,
receiver_string));
@@ -1505,13 +1487,52 @@ class StringMatchSearchAssembler : public StringBuiltinsAssembler {
// ES6 #sec-string.prototype.match
TF_BUILTIN(StringPrototypeMatch, StringMatchSearchAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kMatch, "String.prototype.match", receiver, maybe_regexp, context);
}
+// ES #sec-string.prototype.matchAll
+TF_BUILTIN(StringPrototypeMatchAll, StringBuiltinsAssembler) {
+ char const* method_name = "String.prototype.matchAll";
+
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Context> native_context = LoadNativeContext(context);
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ RequireObjectCoercible(context, receiver, method_name);
+
+ // 2. If regexp is neither undefined nor null, then
+ Label return_match_all_iterator(this);
+ GotoIf(IsNullOrUndefined(maybe_regexp), &return_match_all_iterator);
+ {
+ // a. Let matcher be ? GetMethod(regexp, @@matchAll).
+ // b. If matcher is not undefined, then
+ // i. Return ? Call(matcher, regexp, Ā« O Ā»).
+ auto if_regexp_call = [&] { Goto(&return_match_all_iterator); };
+ auto if_generic_call = [=](Node* fn) {
+ Callable call_callable = CodeFactory::Call(isolate());
+ Return(CallJS(call_callable, context, fn, maybe_regexp, receiver));
+ };
+ MaybeCallFunctionAtSymbol(context, maybe_regexp, receiver,
+ isolate()->factory()->match_all_symbol(),
+ if_regexp_call, if_generic_call);
+ Goto(&return_match_all_iterator);
+ }
+ BIND(&return_match_all_iterator);
+ {
+ // 3. Return ? MatchAllIterator(regexp, O).
+ RegExpBuiltinsAssembler regexp_asm(state());
+ TNode<Object> iterator = regexp_asm.MatchAllIterator(
+ context, native_context, maybe_regexp, receiver, method_name);
+ Return(iterator);
+ }
+}
+
class StringPadAssembler : public StringBuiltinsAssembler {
public:
explicit StringPadAssembler(compiler::CodeAssemblerState* state)
@@ -1640,9 +1661,9 @@ TF_BUILTIN(StringPrototypePadStart, StringPadAssembler) {
// ES6 #sec-string.prototype.search
TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
- Node* const receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_regexp = Parameter(Descriptor::kRegexp);
- Node* const context = Parameter(Descriptor::kContext);
+ TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
+ TNode<Object> maybe_regexp = CAST(Parameter(Descriptor::kRegexp));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
}
@@ -1719,23 +1740,22 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, separator, receiver, isolate()->factory()->split_symbol(),
- [=]() {
- return CallBuiltin(Builtins::kRegExpSplit, context, separator, receiver,
- limit);
+ [&]() {
+ args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context,
+ separator, receiver, limit));
},
- [=](Node* fn) {
+ [&](Node* fn) {
Callable call_callable = CodeFactory::Call(isolate());
- return CallJS(call_callable, context, fn, separator, receiver, limit);
- },
- &args);
+ args.PopAndReturn(
+ CallJS(call_callable, context, fn, separator, receiver, limit));
+ });
// String and integer conversions.
Node* const subject_string = ToString_Inline(context, receiver);
- Node* const limit_number =
- Select(IsUndefined(limit), [=]() { return NumberConstant(kMaxUInt32); },
- [=]() { return ToUint32(context, limit); },
- MachineRepresentation::kTagged);
+ TNode<Number> const limit_number = Select<Number>(
+ IsUndefined(limit), [=] { return NumberConstant(kMaxUInt32); },
+ [=] { return ToUint32(context, limit); });
Node* const separator_string = ToString_Inline(context, separator);
// Shortcut for {limit} == 0.
@@ -1923,7 +1943,7 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
{
TNode<Smi> const zero = SmiConstant(0);
var_result =
- SelectTaggedConstant(SmiLessThan(value_smi, zero), zero, limit);
+ SelectConstant<Smi>(SmiLessThan(value_smi, zero), zero, limit);
Goto(&out);
}
}
@@ -1936,8 +1956,8 @@ TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
TNode<Float64T> const float_zero = Float64Constant(0.);
TNode<Smi> const smi_zero = SmiConstant(0);
TNode<Float64T> const value_float = LoadHeapNumberValue(value_int_hn);
- var_result = SelectTaggedConstant(Float64LessThan(value_float, float_zero),
- smi_zero, limit);
+ var_result = SelectConstant<Smi>(Float64LessThan(value_float, float_zero),
+ smi_zero, limit);
Goto(&out);
}
@@ -2305,7 +2325,7 @@ TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
{
UnicodeEncoding encoding = UnicodeEncoding::UTF16;
TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding);
- TNode<String> value = StringFromCodePoint(ch, encoding);
+ TNode<String> value = StringFromSingleCodePoint(ch, encoding);
var_value.Bind(value);
TNode<IntPtrT> length = LoadStringLengthAsWord(value);
StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 1bd5429fdb..4147b3fc0c 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -95,14 +95,13 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
//
// Contains fast paths for Smi and RegExp objects.
// Important: {regexp_call} may not contain any code that can call into JS.
- typedef std::function<Node*()> NodeFunction0;
- typedef std::function<Node*(Node* fn)> NodeFunction1;
+ typedef std::function<void()> NodeFunction0;
+ typedef std::function<void(Node* fn)> NodeFunction1;
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Node* const maybe_string,
Handle<Symbol> symbol,
const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call,
- CodeStubArguments* args = nullptr);
+ const NodeFunction1& generic_call);
};
class StringIncludesIndexOfAssembler : public StringBuiltinsAssembler {
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index d2e447538d..854bb5e58a 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -330,7 +330,7 @@ inline bool ToUpperOverflows(uc32 character) {
}
template <class Converter>
-MUST_USE_RESULT static Object* ConvertCaseHelper(
+V8_WARN_UNUSED_RESULT static Object* ConvertCaseHelper(
Isolate* isolate, String* string, SeqString* result, int result_length,
unibrow::Mapping<Converter, 128>* mapping) {
DisallowHeapAllocation no_gc;
@@ -427,7 +427,7 @@ MUST_USE_RESULT static Object* ConvertCaseHelper(
}
template <class Converter>
-MUST_USE_RESULT static Object* ConvertCase(
+V8_WARN_UNUSED_RESULT static Object* ConvertCase(
Handle<String> s, Isolate* isolate,
unibrow::Mapping<Converter, 128>* mapping) {
s = String::Flatten(s);
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 77bb09d0f7..1343a293bd 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -11,27 +11,25 @@ namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// ES6 section 19.4 Symbol Objects
+// ES #sec-symbol-objects
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
+// ES #sec-symbol-constructor
BUILTIN(SymbolConstructor) {
HandleScope scope(isolate);
- Handle<Symbol> result = isolate->factory()->NewSymbol();
- Handle<Object> description = args.atOrUndefined(isolate, 1);
- if (!description->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
- Object::ToString(isolate, description));
- result->set_name(*description);
+ if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
+ Handle<Symbol> result = isolate->factory()->NewSymbol();
+ Handle<Object> description = args.atOrUndefined(isolate, 1);
+ if (!description->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, description, Object::ToString(isolate, description));
+ result->set_name(*description);
+ }
+ return *result;
+ } else { // [[Construct]]
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->Symbol_string()));
}
- return *result;
-}
-
-// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
-BUILTIN(SymbolConstructor_ConstructStub) {
- HandleScope scope(isolate);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotConstructor,
- isolate->factory()->Symbol_string()));
}
// ES6 section 19.4.2.1 Symbol.for.
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index 2a4f23b003..e5ac615cca 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -36,9 +36,7 @@ TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- ExternalArrayType type =
- isolate()->factory()->GetArrayTypeFromElementsKind(kind);
- Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
+ Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(kind));
var_typed_map = HeapConstant(map);
});
@@ -630,40 +628,33 @@ void TypedArrayBuiltinsAssembler::ConstructByIterable(
element_size);
}
-TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
-
- // If NewTarget is undefined, throw a TypeError exception.
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- Node* shared = LoadObjectField(target, JSFunction::kSharedFunctionInfoOffset);
- Node* name = LoadObjectField(shared, SharedFunctionInfo::kNameOffset);
- ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ ThrowTypeError(context, MessageTemplate::kConstructAbstractClass,
+ "TypedArray");
}
-TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
- Label if_arg1isbuffer(this), if_arg1istypedarray(this),
- if_arg1isreceiver(this), if_arg1isnumber(this), done(this);
+// ES #sec-typedarray-constructors
+TF_BUILTIN(CreateTypedArray, TypedArrayBuiltinsAssembler) {
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+ TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<JSReceiver> new_target = CAST(Parameter(Descriptor::kNewTarget));
+ TNode<Object> arg1 = CAST(Parameter(Descriptor::kArg1));
+ TNode<Object> arg2 = CAST(Parameter(Descriptor::kArg2));
+ TNode<Object> arg3 = CAST(Parameter(Descriptor::kArg3));
- TNode<Object> new_target = CAST(Parameter(BuiltinDescriptor::kNewTarget));
- CSA_ASSERT(this, IsNotUndefined(new_target));
+ CSA_ASSERT(this, IsConstructor(target));
+ CSA_ASSERT(this, IsJSReceiver(new_target));
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
- CodeStubArguments args(this, argc);
- TNode<Object> arg1 = args.GetOptionalArgumentValue(0);
- TNode<Object> arg2 = args.GetOptionalArgumentValue(1);
- TNode<Object> arg3 = args.GetOptionalArgumentValue(2);
- TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
+ Label if_arg1isbuffer(this), if_arg1istypedarray(this),
+ if_arg1isreceiver(this), if_arg1isnumber(this), return_result(this);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
ConstructorBuiltinsAssembler constructor_assembler(this->state());
- TNode<JSTypedArray> holder = CAST(
+ TNode<JSTypedArray> result = CAST(
constructor_assembler.EmitFastNewObject(context, target, new_target));
TNode<Smi> element_size =
- SmiTag(GetTypedArrayElementSize(LoadElementsKind(holder)));
+ SmiTag(GetTypedArrayElementSize(LoadElementsKind(result)));
GotoIf(TaggedIsSmi(arg1), &if_arg1isnumber);
GotoIf(IsJSArrayBuffer(arg1), &if_arg1isbuffer);
@@ -671,15 +662,23 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
GotoIf(IsJSReceiver(arg1), &if_arg1isreceiver);
Goto(&if_arg1isnumber);
+ // https://tc39.github.io/ecma262/#sec-typedarray-buffer-byteoffset-length
BIND(&if_arg1isbuffer);
- ConstructByArrayBuffer(context, holder, CAST(arg1), arg2, arg3, element_size);
- Goto(&done);
+ {
+ ConstructByArrayBuffer(context, result, CAST(arg1), arg2, arg3,
+ element_size);
+ Goto(&return_result);
+ }
+ // https://tc39.github.io/ecma262/#sec-typedarray-typedarray
BIND(&if_arg1istypedarray);
- TNode<JSTypedArray> typed_array = CAST(arg1);
- ConstructByTypedArray(context, holder, typed_array, element_size);
- Goto(&done);
+ {
+ TNode<JSTypedArray> typed_array = CAST(arg1);
+ ConstructByTypedArray(context, result, typed_array, element_size);
+ Goto(&return_result);
+ }
+ // https://tc39.github.io/ecma262/#sec-typedarray-object
BIND(&if_arg1isreceiver);
{
Label if_iteratorundefined(this), if_iteratornotcallable(this);
@@ -690,8 +689,8 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
GotoIf(TaggedIsSmi(iteratorFn), &if_iteratornotcallable);
GotoIfNot(IsCallable(iteratorFn), &if_iteratornotcallable);
- ConstructByIterable(context, holder, CAST(arg1), iteratorFn, element_size);
- Goto(&done);
+ ConstructByIterable(context, result, CAST(arg1), iteratorFn, element_size);
+ Goto(&return_result);
BIND(&if_iteratorundefined);
{
@@ -699,22 +698,61 @@ TF_BUILTIN(TypedArrayConstructor_ConstructStub, TypedArrayBuiltinsAssembler) {
TNode<Object> initial_length =
GetProperty(context, arg1, LengthStringConstant());
- ConstructByArrayLike(context, holder, array_like, initial_length,
+ ConstructByArrayLike(context, result, array_like, initial_length,
element_size);
- Goto(&done);
+ Goto(&return_result);
}
BIND(&if_iteratornotcallable);
{ ThrowTypeError(context, MessageTemplate::kIteratorSymbolNonCallable); }
}
- // First arg was a number or fell through and will be treated as a number.
+ // The first argument was a number or fell through and is treated as
+ // a number. https://tc39.github.io/ecma262/#sec-typedarray-length
BIND(&if_arg1isnumber);
- ConstructByLength(context, holder, arg1, element_size);
- Goto(&done);
+ {
+ ConstructByLength(context, result, arg1, element_size);
+ Goto(&return_result);
+ }
- BIND(&done);
- args.PopAndReturn(holder);
+ BIND(&return_result);
+ Return(result);
+}
+
+TF_BUILTIN(TypedArrayConstructorLazyDeoptContinuation,
+ TypedArrayBuiltinsAssembler) {
+ Node* result = Parameter(Descriptor::kResult);
+ Return(result);
+}
+
+// ES #sec-typedarray-constructors
+TF_BUILTIN(TypedArrayConstructor, TypedArrayBuiltinsAssembler) {
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* arg1 = args.GetOptionalArgumentValue(0);
+ Node* arg2 = args.GetOptionalArgumentValue(1);
+ Node* arg3 = args.GetOptionalArgumentValue(2);
+
+ // If NewTarget is undefined, throw a TypeError exception.
+ // All the TypedArray constructors have this as the first step:
+ // https://tc39.github.io/ecma262/#sec-typedarray-constructors
+ Label throwtypeerror(this, Label::kDeferred);
+ GotoIf(IsUndefined(new_target), &throwtypeerror);
+
+ Node* result = CallBuiltin(Builtins::kCreateTypedArray, context, target,
+ new_target, arg1, arg2, arg3);
+ args.PopAndReturn(result);
+
+ BIND(&throwtypeerror);
+ {
+ Node* name = CallRuntime(Runtime::kGetFunctionName, context, target);
+ ThrowTypeError(context, MessageTemplate::kConstructorNotFunction, name);
+ }
}
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
@@ -829,11 +867,10 @@ TNode<Object> TypedArrayBuiltinsAssembler::TypedArraySpeciesConstructor(
var_constructor = default_constructor;
Node* map = LoadMap(exemplar);
GotoIfNot(IsPrototypeTypedArrayPrototype(context, map), &slow);
- Branch(IsSpeciesProtectorCellInvalid(), &slow, &done);
+ Branch(IsTypedArraySpeciesProtectorCellInvalid(), &slow, &done);
BIND(&slow);
- var_constructor =
- CAST(SpeciesConstructor(context, exemplar, default_constructor));
+ var_constructor = SpeciesConstructor(context, exemplar, default_constructor);
Goto(&done);
BIND(&done);
@@ -1260,8 +1297,7 @@ TF_BUILTIN(TypedArrayPrototypeSlice, TypedArrayBuiltinsAssembler) {
[=] {
return SmiTag(ConvertToRelativeIndex(
context, end, SmiUntag(source_length)));
- },
- MachineRepresentation::kTagged);
+ });
// Create a result array by invoking TypedArraySpeciesCreate.
TNode<Smi> count = SmiMax(SmiSub(end_index, start_index), SmiConstant(0));
@@ -1460,15 +1496,11 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) {
void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Node* context, Node* receiver, const char* method_name,
- IterationKind iteration_kind) {
+ IterationKind kind) {
Label throw_bad_receiver(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
-
- Node* map = LoadMap(receiver);
- Node* instance_type = LoadMapInstanceType(map);
- GotoIfNot(InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE),
- &throw_bad_receiver);
+ GotoIfNot(IsJSTypedArray(receiver), &throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
@@ -1476,8 +1508,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
Label if_receiverisneutered(this, Label::kDeferred);
GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
- Return(CreateArrayIterator(receiver, map, instance_type, context,
- iteration_kind));
+ Return(CreateArrayIterator(context, receiver, kind));
BIND(&throw_bad_receiver);
ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name);
@@ -1486,7 +1517,7 @@ void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
}
-// ES6 #sec-%typedarray%.prototype.values
+// ES #sec-%typedarray%.prototype.values
TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1495,7 +1526,7 @@ TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
IterationKind::kValues);
}
-// ES6 #sec-%typedarray%.prototype.entries
+// ES #sec-%typedarray%.prototype.entries
TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
@@ -1504,7 +1535,7 @@ TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
IterationKind::kEntries);
}
-// ES6 #sec-%typedarray%.prototype.keys
+// ES #sec-%typedarray%.prototype.keys
TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* receiver = Parameter(Descriptor::kReceiver);
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 3493e776b6..35a77c7518 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,7 +8,7 @@
#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
namespace v8 {
@@ -79,8 +79,8 @@ class BuiltinArguments : public Arguments {
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
#define BUILTIN(name) \
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
- Isolate* isolate); \
+ V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
\
V8_NOINLINE static Object* Builtin_Impl_Stats_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
@@ -92,7 +92,7 @@ class BuiltinArguments : public Arguments {
return Builtin_Impl_##name(args, isolate); \
} \
\
- MUST_USE_RESULT Object* Builtin_##name( \
+ V8_WARN_UNUSED_RESULT Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
if (V8_UNLIKELY(FLAG_runtime_stats)) { \
@@ -102,8 +102,8 @@ class BuiltinArguments : public Arguments {
return Builtin_Impl_##name(args, isolate); \
} \
\
- MUST_USE_RESULT static Object* Builtin_Impl_##name(BuiltinArguments args, \
- Isolate* isolate)
+ V8_WARN_UNUSED_RESULT static Object* Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ad1763a292..c348248fff 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -195,16 +195,15 @@ Address Builtins::CppEntryOf(int index) {
}
// static
-bool Builtins::IsBuiltin(Code* code) {
+bool Builtins::IsBuiltin(const Code* code) {
return Builtins::IsBuiltinId(code->builtin_index());
}
// static
-bool Builtins::IsOffHeapBuiltin(Code* code) {
+bool Builtins::IsEmbeddedBuiltin(const Code* code) {
#ifdef V8_EMBEDDED_BUILTINS
- return FLAG_stress_off_heap_code &&
- Builtins::IsBuiltinId(code->builtin_index()) &&
- Builtins::IsOffHeapSafe(code->builtin_index());
+ return Builtins::IsBuiltinId(code->builtin_index()) &&
+ Builtins::IsIsolateIndependent(code->builtin_index());
#else
return false;
#endif
@@ -213,6 +212,12 @@ bool Builtins::IsOffHeapBuiltin(Code* code) {
// static
bool Builtins::IsLazy(int index) {
DCHECK(IsBuiltinId(index));
+
+#ifdef V8_EMBEDDED_BUILTINS
+ // We don't want to lazy-deserialize off-heap builtins.
+ if (Builtins::IsIsolateIndependent(index)) return false;
+#endif
+
// There are a couple of reasons that builtins can require eager-loading,
// i.e. deserialization at isolate creation instead of on-demand. For
// instance:
@@ -225,50 +230,41 @@ bool Builtins::IsLazy(int index) {
// TODO(wasm): Remove wasm builtins once immovability is no longer required.
switch (index) {
case kAbort: // Required by wasm.
- case kArrayFindLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFindLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- // https://crbug.com/v8/6786.
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- // https://crbug.com/v8/6786.
+ case kArrayEveryLoopEagerDeoptContinuation:
+ case kArrayEveryLoopLazyDeoptContinuation:
+ case kArrayFilterLoopEagerDeoptContinuation:
+ case kArrayFilterLoopLazyDeoptContinuation:
+ case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
case kArrayFindIndexLoopEagerDeoptContinuation:
- // https://crbug.com/v8/6786.
case kArrayFindIndexLoopLazyDeoptContinuation:
- // https://crbug.com/v8/6786.
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayForEachLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayMapLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayMapLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayEveryLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayEveryLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFilterLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayFilterLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
+ case kArrayFindLoopEagerDeoptContinuation:
+ case kArrayFindLoopLazyDeoptContinuation:
+ case kArrayForEachLoopEagerDeoptContinuation:
+ case kArrayForEachLoopLazyDeoptContinuation:
+ case kArrayMapLoopEagerDeoptContinuation:
+ case kArrayMapLoopLazyDeoptContinuation:
+ case kArrayReduceLoopEagerDeoptContinuation:
+ case kArrayReduceLoopLazyDeoptContinuation:
case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayReduceLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
case kArrayReduceRightLoopEagerDeoptContinuation:
case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation: // https://crbug.com/v8/6786.
- case kArraySomeLoopLazyDeoptContinuation: // https://crbug.com/v8/6786.
+ case kArrayReduceRightPreLoopEagerDeoptContinuation:
+ case kArraySomeLoopEagerDeoptContinuation:
+ case kArraySomeLoopLazyDeoptContinuation:
case kAsyncGeneratorAwaitCaught: // https://crbug.com/v8/6786.
case kAsyncGeneratorAwaitUncaught: // https://crbug.com/v8/6786.
- case kCheckOptimizationMarker:
case kCompileLazy:
+ case kDebugBreakTrampoline:
case kDeserializeLazy:
case kFunctionPrototypeHasInstance: // https://crbug.com/v8/6786.
case kHandleApiCall:
case kIllegal:
+ case kInstantiateAsmJs:
case kInterpreterEnterBytecodeAdvance:
case kInterpreterEnterBytecodeDispatch:
case kInterpreterEntryTrampoline:
- case kObjectConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kPromiseConstructorLazyDeoptContinuation: // crbug/v8/6786.
- case kProxyConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kNumberConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kStringConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kTypedArrayConstructor_ConstructStub: // https://crbug.com/v8/6787.
- case kProxyConstructor: // https://crbug.com/v8/6787.
+ case kPromiseConstructorLazyDeoptContinuation:
case kRecordWrite: // https://crbug.com/chromium/765301.
case kThrowWasmTrapDivByZero: // Required by wasm.
case kThrowWasmTrapDivUnrepresentable: // Required by wasm.
@@ -278,7 +274,9 @@ bool Builtins::IsLazy(int index) {
case kThrowWasmTrapMemOutOfBounds: // Required by wasm.
case kThrowWasmTrapRemByZero: // Required by wasm.
case kThrowWasmTrapUnreachable: // Required by wasm.
+ case kToBooleanLazyDeoptContinuation:
case kToNumber: // Required by wasm.
+ case kTypedArrayConstructorLazyDeoptContinuation:
case kWasmCompileLazy: // Required by wasm.
case kWasmStackGuard: // Required by wasm.
return false;
@@ -295,711 +293,24 @@ bool Builtins::IsIsolateIndependent(int index) {
switch (index) {
#ifdef DEBUG
case kAbortJS:
- case kAllocateHeapNumber:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
case kContinueToCodeStubBuiltin:
case kContinueToCodeStubBuiltinWithResult:
case kContinueToJavaScriptBuiltin:
case kContinueToJavaScriptBuiltinWithResult:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDivide:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kIncrement:
case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLoadField:
- case kLoadGlobalICInsideTypeofTrampoline:
case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
case kLoadIC_Slow:
- case kLoadICTrampoline:
- case kMapPrototypeEntries:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberPrototypeValueOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeValueOf:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromiseResolveTrampoline:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kReflectHas:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kReturnReceiver:
- case kSetPrototypeEntries:
- case kSetPrototypeGetSize:
- case kSetPrototypeValues:
case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
-#endif
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
- case kThrowWasmTrapDivByZero:
- case kThrowWasmTrapDivUnrepresentable:
- case kThrowWasmTrapFloatUnrepresentable:
- case kThrowWasmTrapFuncInvalid:
- case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
- case kThrowWasmTrapUnreachable:
- case kToInteger:
- case kTypedArrayConstructor:
case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
-#else
- case kAbortJS:
- case kAdd:
- case kAllocateHeapNumber:
- case kArrayEvery:
- case kArrayEveryLoopContinuation:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayEveryLoopLazyDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFilterLoopLazyDeoptContinuation:
- case kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindIndexLoopContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopAfterCallbackLazyDeoptContinuation:
- case kArrayFindLoopContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEach:
- case kArrayForEachLoopContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayFrom:
- case kArrayIncludes:
- case kArrayIndexOf:
- case kArrayIsArray:
- case kArrayMapLoopContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayMapLoopLazyDeoptContinuation:
- case kArrayOf:
- case kArrayPrototypeEntries:
- case kArrayPrototypeFind:
- case kArrayPrototypeFindIndex:
- case kArrayPrototypeKeys:
- case kArrayPrototypeSlice:
- case kArrayPrototypeValues:
- case kArrayReduce:
- case kArrayReduceLoopContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRight:
- case kArrayReduceRightLoopContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySome:
- case kArraySomeLoopContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kArraySomeLoopLazyDeoptContinuation:
- case kAsyncFromSyncIteratorPrototypeNext:
- case kAsyncFromSyncIteratorPrototypeReturn:
- case kAsyncFromSyncIteratorPrototypeThrow:
- case kAsyncFunctionAwaitFulfill:
- case kAsyncFunctionAwaitReject:
- case kAsyncFunctionPromiseCreate:
- case kAsyncFunctionPromiseRelease:
- case kAsyncGeneratorAwaitFulfill:
- case kAsyncGeneratorAwaitReject:
- case kAsyncGeneratorResumeNext:
- case kAsyncGeneratorReturnClosedFulfill:
- case kAsyncGeneratorReturnClosedReject:
- case kAsyncGeneratorReturnFulfill:
- case kAsyncGeneratorYieldFulfill:
- case kAsyncIteratorValueUnwrap:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
- case kCallProxy:
- case kConstructFunction:
- case kConstructProxy:
- case kContinueToCodeStubBuiltin:
- case kContinueToCodeStubBuiltinWithResult:
- case kContinueToJavaScriptBuiltin:
- case kContinueToJavaScriptBuiltinWithResult:
- case kCreateGeneratorObject:
- case kCreateIterResultObject:
- case kCreateRegExpLiteral:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDeleteProperty:
- case kDivide:
- case kEqual:
- case kFastConsoleAssert:
- case kFastNewClosure:
- case kFastNewFunctionContextEval:
- case kFastNewFunctionContextFunction:
- case kFastNewObject:
- case kFindOrderedHashMapEntry:
- case kForInEnumerate:
- case kForInFilter:
- case kFunctionPrototypeHasInstance:
- case kGeneratorPrototypeNext:
- case kGeneratorPrototypeReturn:
- case kGeneratorPrototypeThrow:
- case kGetSuperConstructor:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kGreaterThan:
- case kGreaterThanOrEqual:
- case kHasProperty:
- case kIncrement:
- case kInstanceOf:
- case kKeyedLoadIC_Megamorphic:
- case kKeyedLoadIC_PolymorphicName:
- case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
- case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLessThan:
- case kLessThanOrEqual:
- case kLoadField:
- case kLoadGlobalIC:
- case kLoadGlobalICInsideTypeof:
- case kLoadGlobalICInsideTypeofTrampoline:
- case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
- case kLoadIC:
- case kLoadIC_FunctionPrototype:
- case kLoadIC_Noninlined:
- case kLoadIC_Slow:
- case kLoadIC_StringLength:
- case kLoadIC_StringWrapperLength:
- case kLoadICTrampoline:
- case kLoadIC_Uninitialized:
- case kMapPrototypeEntries:
- case kMapPrototypeForEach:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNegate:
- case kNewArgumentsElements:
- case kNonNumberToNumber:
- case kNonNumberToNumeric:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberConstructor:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberParseFloat:
- case kNumberPrototypeValueOf:
- case kNumberToString:
- case kObjectConstructor:
- case kObjectConstructor_ConstructStub:
- case kObjectCreate:
- case kObjectIs:
- case kObjectKeys:
- case kObjectPrototypeHasOwnProperty:
- case kObjectPrototypeIsPrototypeOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeToString:
- case kObjectPrototypeValueOf:
- case kOrderedHashTableHealIndex:
- case kOrdinaryHasInstance:
- case kOrdinaryToPrimitive_Number:
- case kOrdinaryToPrimitive_String:
- case kPromiseAll:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseCatchFinally:
- case kPromiseConstructor:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseFulfillReactionJob:
- case kPromiseInternalConstructor:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromisePrototypeCatch:
- case kPromisePrototypeFinally:
- case kPromiseRace:
- case kPromiseReject:
- case kPromiseRejectReactionJob:
- case kPromiseResolve:
- case kPromiseResolveThenableJob:
- case kPromiseResolveTrampoline:
- case kPromiseThenFinally:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kProxyGetProperty:
- case kProxyHasProperty:
- case kProxySetProperty:
- case kRecordWrite:
- case kReflectHas:
- case kRegExpConstructor:
- case kRegExpPrototypeCompile:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeFlagsGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeReplace:
- case kRegExpPrototypeSearch:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeSplit:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kResolvePromise:
- case kReturnReceiver:
- case kRunMicrotasks:
- case kSameValue:
- case kSetPrototypeEntries:
- case kSetPrototypeForEach:
- case kSetPrototypeGetSize:
- case kSetPrototypeHas:
- case kSetPrototypeValues:
- case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStrictEqual:
- case kStringCodePointAtUTF16:
- case kStringCodePointAtUTF32:
- case kStringConstructor:
- case kStringEqual:
- case kStringGreaterThan:
- case kStringGreaterThanOrEqual:
- case kStringIndexOf:
- case kStringLessThan:
- case kStringLessThanOrEqual:
- case kStringPrototypeAnchor:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeCharCodeAt:
- case kStringPrototypeCodePointAt:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeFontcolor:
- case kStringPrototypeFontsize:
- case kStringPrototypeIncludes:
- case kStringPrototypeIndexOf:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeLink:
- case kStringPrototypeMatch:
- case kStringPrototypePadEnd:
- case kStringPrototypePadStart:
- case kStringPrototypeRepeat:
- case kStringPrototypeReplace:
- case kStringPrototypeSearch:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
- case kStringToLowerCaseIntl:
-#endif
- case kStringPrototypeToString:
- case kStringPrototypeValueOf:
- case kStringRepeat:
- case kStringToNumber:
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
- case kThrowWasmTrapDivByZero:
- case kThrowWasmTrapDivUnrepresentable:
- case kThrowWasmTrapFloatUnrepresentable:
- case kThrowWasmTrapFuncInvalid:
- case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
case kThrowWasmTrapUnreachable:
- case kToBoolean:
- case kToBooleanLazyDeoptContinuation:
- case kToInteger:
- case kToInteger_TruncateMinusZero:
- case kToName:
- case kToNumber:
- case kToNumeric:
- case kToString:
- case kTypedArrayConstructor:
- case kTypedArrayConstructor_ConstructStub:
- case kTypedArrayPrototypeByteLength:
- case kTypedArrayPrototypeByteOffset:
- case kTypedArrayPrototypeEntries:
- case kTypedArrayPrototypeEvery:
- case kTypedArrayPrototypeFind:
- case kTypedArrayPrototypeFindIndex:
- case kTypedArrayPrototypeForEach:
- case kTypedArrayPrototypeKeys:
- case kTypedArrayPrototypeLength:
- case kTypedArrayPrototypeReduce:
- case kTypedArrayPrototypeReduceRight:
- case kTypedArrayPrototypeSet:
- case kTypedArrayPrototypeSlice:
- case kTypedArrayPrototypeSome:
- case kTypedArrayPrototypeSubArray:
- case kTypedArrayPrototypeToStringTag:
- case kTypedArrayPrototypeValues:
- case kTypeof:
- case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapLookupHashIndex:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
-#endif
- return true;
- default:
- return false;
- }
- UNREACHABLE();
-}
-
-// static
-bool Builtins::IsOffHeapSafe(int index) {
-#ifndef V8_EMBEDDED_BUILTINS
- return false;
-#else
- DCHECK(IsBuiltinId(index));
- if (IsTooShortForOffHeapTrampoline(index)) return false;
- switch (index) {
-#ifdef DEBUG
- case kAbortJS:
- case kAllocateHeapNumber:
- case kArrayEveryLoopEagerDeoptContinuation:
- case kArrayFilterLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopEagerDeoptContinuation:
- case kArrayFindIndexLoopLazyDeoptContinuation:
- case kArrayFindLoopEagerDeoptContinuation:
- case kArrayFindLoopLazyDeoptContinuation:
- case kArrayForEachLoopEagerDeoptContinuation:
- case kArrayForEachLoopLazyDeoptContinuation:
- case kArrayMapLoopEagerDeoptContinuation:
- case kArrayReduceLoopEagerDeoptContinuation:
- case kArrayReduceLoopLazyDeoptContinuation:
- case kArrayReducePreLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopEagerDeoptContinuation:
- case kArrayReduceRightLoopLazyDeoptContinuation:
- case kArrayReduceRightPreLoopEagerDeoptContinuation:
- case kArraySomeLoopEagerDeoptContinuation:
- case kBitwiseNot:
- case kBooleanPrototypeToString:
- case kBooleanPrototypeValueOf:
- case kContinueToCodeStubBuiltin:
- case kContinueToCodeStubBuiltinWithResult:
- case kContinueToJavaScriptBuiltin:
- case kContinueToJavaScriptBuiltinWithResult:
- case kDatePrototypeGetDate:
- case kDatePrototypeGetDay:
- case kDatePrototypeGetFullYear:
- case kDatePrototypeGetHours:
- case kDatePrototypeGetMilliseconds:
- case kDatePrototypeGetMinutes:
- case kDatePrototypeGetMonth:
- case kDatePrototypeGetSeconds:
- case kDatePrototypeGetTime:
- case kDatePrototypeGetTimezoneOffset:
- case kDatePrototypeGetUTCDate:
- case kDatePrototypeGetUTCDay:
- case kDatePrototypeGetUTCFullYear:
- case kDatePrototypeGetUTCHours:
- case kDatePrototypeGetUTCMilliseconds:
- case kDatePrototypeGetUTCMinutes:
- case kDatePrototypeGetUTCMonth:
- case kDatePrototypeGetUTCSeconds:
- case kDatePrototypeToPrimitive:
- case kDatePrototypeValueOf:
- case kDecrement:
- case kDivide:
- case kGlobalIsFinite:
- case kGlobalIsNaN:
- case kIncrement:
- case kKeyedLoadIC_Slow:
- case kKeyedLoadICTrampoline:
- case kKeyedStoreIC_Slow:
- case kKeyedStoreICTrampoline:
- case kLoadField:
- case kLoadGlobalICInsideTypeofTrampoline:
- case kLoadGlobalIC_Slow:
- case kLoadGlobalICTrampoline:
- case kLoadIC_Slow:
- case kLoadICTrampoline:
- case kMapPrototypeEntries:
- case kMapPrototypeGet:
- case kMapPrototypeGetSize:
- case kMapPrototypeHas:
- case kMapPrototypeKeys:
- case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
- case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
- case kMathFloor:
- case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
- case kMathMax:
- case kMathMin:
- case kMathRound:
- case kMathSign:
- case kMathSin:
- case kMathSinh:
- case kMathSqrt:
- case kMathTan:
- case kMathTanh:
- case kMathTrunc:
- case kModulus:
- case kMultiply:
- case kNonPrimitiveToPrimitive_Default:
- case kNonPrimitiveToPrimitive_Number:
- case kNonPrimitiveToPrimitive_String:
- case kNumberIsFinite:
- case kNumberIsInteger:
- case kNumberIsNaN:
- case kNumberIsSafeInteger:
- case kNumberPrototypeValueOf:
- case kObjectPrototypeToLocaleString:
- case kObjectPrototypeValueOf:
- case kPromiseCapabilityDefaultReject:
- case kPromiseCapabilityDefaultResolve:
- case kPromiseConstructorLazyDeoptContinuation:
- case kPromiseInternalReject:
- case kPromiseInternalResolve:
- case kPromiseResolveTrampoline:
- case kPromiseThrowerFinally:
- case kPromiseValueThunkFinally:
- case kProxyConstructor:
- case kReflectHas:
- case kRegExpPrototypeDotAllGetter:
- case kRegExpPrototypeGlobalGetter:
- case kRegExpPrototypeIgnoreCaseGetter:
- case kRegExpPrototypeMultilineGetter:
- case kRegExpPrototypeSourceGetter:
- case kRegExpPrototypeStickyGetter:
- case kRegExpPrototypeUnicodeGetter:
- case kReturnReceiver:
- case kSetPrototypeEntries:
- case kSetPrototypeGetSize:
- case kSetPrototypeValues:
- case kStoreGlobalIC_Slow:
- case kStoreGlobalICTrampoline:
- case kStoreICTrampoline:
- case kStringPrototypeBig:
- case kStringPrototypeBlink:
- case kStringPrototypeBold:
- case kStringPrototypeConcat:
- case kStringPrototypeFixed:
- case kStringPrototypeItalics:
- case kStringPrototypeIterator:
- case kStringPrototypeSmall:
- case kStringPrototypeStrike:
- case kStringPrototypeSub:
- case kStringPrototypeSup:
-#ifdef V8_INTL_SUPPORT
- case kStringPrototypeToLowerCaseIntl:
-#endif
- case kSubtract:
- case kSymbolPrototypeToPrimitive:
- case kSymbolPrototypeToString:
- case kSymbolPrototypeValueOf:
+ case kThrowWasmTrapMemOutOfBounds:
case kThrowWasmTrapDivByZero:
case kThrowWasmTrapDivUnrepresentable:
+ case kThrowWasmTrapRemByZero:
case kThrowWasmTrapFloatUnrepresentable:
case kThrowWasmTrapFuncInvalid:
case kThrowWasmTrapFuncSigMismatch:
- case kThrowWasmTrapMemOutOfBounds:
- case kThrowWasmTrapRemByZero:
- case kThrowWasmTrapUnreachable:
- case kToInteger:
- case kTypedArrayConstructor:
- case kWasmStackGuard:
- case kWeakMapGet:
- case kWeakMapHas:
- case kWeakMapPrototypeDelete:
- case kWeakMapPrototypeSet:
- case kWeakSetHas:
- case kWeakSetPrototypeAdd:
- case kWeakSetPrototypeDelete:
#else
case kAbortJS:
case kAdd:
@@ -1068,9 +379,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kBitwiseNot:
case kBooleanPrototypeToString:
case kBooleanPrototypeValueOf:
- case kCallProxy:
- case kConstructFunction:
- case kConstructProxy:
case kContinueToCodeStubBuiltin:
case kContinueToCodeStubBuiltinWithResult:
case kContinueToJavaScriptBuiltin:
@@ -1140,6 +448,8 @@ bool Builtins::IsOffHeapSafe(int index) {
case kLoadIC_FunctionPrototype:
case kLoadIC_Noninlined:
case kLoadIC_Slow:
+ case kLoadIC_StringLength:
+ case kLoadIC_StringWrapperLength:
case kLoadICTrampoline:
case kLoadIC_Uninitialized:
case kMapPrototypeEntries:
@@ -1149,36 +459,15 @@ bool Builtins::IsOffHeapSafe(int index) {
case kMapPrototypeHas:
case kMapPrototypeKeys:
case kMapPrototypeValues:
- case kMathAcos:
- case kMathAcosh:
- case kMathAsin:
- case kMathAsinh:
- case kMathAtan:
- case kMathAtan2:
- case kMathAtanh:
- case kMathCbrt:
case kMathCeil:
- case kMathCos:
- case kMathCosh:
- case kMathExp:
- case kMathExpm1:
case kMathFloor:
case kMathFround:
- case kMathLog:
- case kMathLog10:
- case kMathLog1p:
- case kMathLog2:
case kMathMax:
case kMathMin:
case kMathRound:
case kMathSign:
- case kMathSin:
- case kMathSinh:
case kMathSqrt:
- case kMathTan:
- case kMathTanh:
case kMathTrunc:
- case kModulus:
case kMultiply:
case kNegate:
case kNewArgumentsElements:
@@ -1187,7 +476,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kNonPrimitiveToPrimitive_Default:
case kNonPrimitiveToPrimitive_Number:
case kNonPrimitiveToPrimitive_String:
- case kNumberConstructor:
case kNumberIsFinite:
case kNumberIsInteger:
case kNumberIsNaN:
@@ -1196,7 +484,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kNumberPrototypeValueOf:
case kNumberToString:
case kObjectConstructor:
- case kObjectConstructor_ConstructStub:
case kObjectCreate:
case kObjectIs:
case kObjectKeys:
@@ -1209,7 +496,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kOrdinaryHasInstance:
case kOrdinaryToPrimitive_Number:
case kOrdinaryToPrimitive_String:
- case kPromiseAll:
case kPromiseCapabilityDefaultReject:
case kPromiseCapabilityDefaultResolve:
case kPromiseCatchFinally:
@@ -1230,11 +516,9 @@ bool Builtins::IsOffHeapSafe(int index) {
case kPromiseThenFinally:
case kPromiseThrowerFinally:
case kPromiseValueThunkFinally:
- case kProxyConstructor:
case kProxyGetProperty:
case kProxyHasProperty:
case kProxySetProperty:
- case kRecordWrite:
case kReflectHas:
case kRegExpConstructor:
case kRegExpPrototypeCompile:
@@ -1264,7 +548,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kStrictEqual:
case kStringCodePointAtUTF16:
case kStringCodePointAtUTF32:
- case kStringConstructor:
case kStringEqual:
case kStringGreaterThan:
case kStringGreaterThanOrEqual:
@@ -1325,7 +608,6 @@ bool Builtins::IsOffHeapSafe(int index) {
case kToNumeric:
case kToString:
case kTypedArrayConstructor:
- case kTypedArrayConstructor_ConstructStub:
case kTypedArrayPrototypeByteLength:
case kTypedArrayPrototypeByteOffset:
case kTypedArrayPrototypeEntries:
@@ -1359,19 +641,33 @@ bool Builtins::IsOffHeapSafe(int index) {
return false;
}
UNREACHABLE();
-#endif // V8_EMBEDDED_BUILTINS
}
+#ifdef V8_EMBEDDED_BUILTINS
// static
-bool Builtins::IsTooShortForOffHeapTrampoline(int index) {
- switch (index) {
- case kLoadIC_StringLength:
- case kLoadIC_StringWrapperLength:
- return true;
- default:
- return false;
+Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
+ Address off_heap_entry) {
+ DCHECK(isolate->serializer_enabled());
+ DCHECK_NOT_NULL(isolate->embedded_blob());
+ DCHECK_NE(0, isolate->embedded_blob_size());
+
+ constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
+ byte buffer[buffer_size]; // NOLINT(runtime/arrays)
+
+ // Generate replacement code that simply tail-calls the off-heap code.
+ MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
+ DCHECK(!masm.has_frame());
+ {
+ FrameScope scope(&masm, StackFrame::NONE);
+ masm.JumpToInstructionStream(off_heap_entry);
}
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+
+ return isolate->factory()->NewCode(desc, Code::BUILTIN, masm.CodeObject());
}
+#endif // V8_EMBEDDED_BUILTINS
// static
Builtins::Kind Builtins::KindOf(int index) {
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index bf96469d19..7745420366 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -72,7 +72,7 @@ class Builtins {
Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> JSConstructStubGeneric();
- // Used by BuiltinDeserializer.
+ // Used by BuiltinDeserializer and CreateOffHeapTrampolines in isolate.cc.
void set_builtin(int index, HeapObject* builtin);
Code* builtin(int index) {
@@ -111,10 +111,10 @@ class Builtins {
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
- static bool IsBuiltin(Code* code);
+ static bool IsBuiltin(const Code* code);
- // True, iff the given code object is a builtin with off-heap code.
- static bool IsOffHeapBuiltin(Code* code);
+ // True, iff the given code object is a builtin with off-heap embedded code.
+ static bool IsEmbeddedBuiltin(const Code* code);
// Returns true iff the given builtin can be lazy-loaded from the snapshot.
// This is true in general for most builtins with the exception of a few
@@ -125,17 +125,6 @@ class Builtins {
// TODO(jgruber,v8:6666): Remove once all builtins have been migrated.
static bool IsIsolateIndependent(int index);
- // This is the condition we currently use to determine whether a builtin is
- // copied off-heap when --stress-off-heap-code is passed. Such builtins do not
- // need to be isolate-independent, e.g. they can contain external references
- // that point to one specific isolate. A further restrictions is that there
- // must be enough space for the trampoline.
- static bool IsOffHeapSafe(int index);
-
- // The off-heap trampoline is short but requires a certain minimal instruction
- // size. This function states whether a given builtin is too short.
- static bool IsTooShortForOffHeapTrampoline(int index);
-
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
@@ -144,7 +133,7 @@ class Builtins {
initialized_ = true;
}
- MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> InvokeApiFunction(
Isolate* isolate, bool is_construct, Handle<HeapObject> function,
Handle<Object> receiver, int argc, Handle<Object> args[],
Handle<HeapObject> new_target);
@@ -160,6 +149,14 @@ class Builtins {
private:
Builtins();
+#ifdef V8_EMBEDDED_BUILTINS
+ // Creates a trampoline code object that jumps to the given off-heap entry.
+ // The result should not be used directly, but only from the related Factory
+ // function.
+ static Handle<Code> GenerateOffHeapTrampolineFor(Isolate* isolate,
+ Address off_heap_entry);
+#endif
+
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode);
@@ -198,6 +195,7 @@ class Builtins {
Object* builtins_[builtin_count];
bool initialized_;
+ friend class Factory; // For GenerateOffHeapTrampolineFor.
friend class Isolate;
friend class SetupIsolateDelegate;
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index a4117bd5a2..c044a348da 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -32,6 +32,8 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// Not yet finalized.
DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
+
+ DCHECK(isolate_->serializer_enabled());
#endif
uint32_t* maybe_key = map_.Find(object);
@@ -49,6 +51,7 @@ void BuiltinsConstantsTableBuilder::Finalize() {
DCHECK_EQ(isolate_->heap()->empty_fixed_array(),
isolate_->heap()->builtins_constants_table());
+ DCHECK(isolate_->serializer_enabled());
DCHECK_LT(0, map_.size());
Handle<FixedArray> table =
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
index d251d5849b..aefad8c3bb 100644
--- a/deps/v8/src/builtins/constants-table-builder.h
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -24,8 +24,8 @@ class BuiltinsConstantsTableBuilder final {
public:
explicit BuiltinsConstantsTableBuilder(Isolate* isolate);
- // Returns the index within the builtins constants list for the given object,
- // possibly adding the object to the cache. Objects are deduplicated.
+ // Returns the index within the builtins constants table for the given
+ // object, possibly adding the object to the table. Objects are deduplicated.
uint32_t AddObject(Handle<Object> object);
// Should be called after all affected code (e.g. builtins and bytecode
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc
index 3a155e26f9..eae9ff5594 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.cc
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc
@@ -90,8 +90,8 @@ TNode<FixedArray> GrowableFixedArray::ResizeFixedArray(
CodeStubAssembler::ExtractFixedArrayFlags flags;
flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays;
- TNode<FixedArray> to_array = CAST(ExtractFixedArray(
- from_array, nullptr, element_count, new_capacity, flags));
+ TNode<FixedArray> to_array = ExtractFixedArray(
+ from_array, nullptr, element_count, new_capacity, flags);
return to_array;
}
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 5bc083f531..2afcebab80 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -217,7 +217,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver);
@@ -338,7 +338,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(ebx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(ebx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -382,9 +382,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
return Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -497,6 +494,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
+ __ j(not_equal, &done, Label::kNear);
+ __ mov(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -571,6 +581,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ GetSharedFunctionInfoBytecode(masm, ecx, eax);
+ __ Pop(eax);
__ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
}
@@ -673,7 +686,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// -----------------------------------
DCHECK(!AreAliased(feedback_vector, eax, edx, edi, scratch));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = edi;
Register optimized_code_entry = scratch;
@@ -682,9 +695,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is an optimization marker.
@@ -719,12 +732,11 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
+
+ __ LoadWeakValue(optimized_code_entry, &fallthrough);
- __ mov(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
__ push(eax);
__ push(edx);
@@ -858,10 +870,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
+ Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
+ apply_instrumentation;
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
+ __ Pop(eax);
__ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
&maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -978,15 +994,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
- __ push(ebx); // feedback_vector == ebx, so save it.
- __ mov(ecx, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
- __ mov(ebx, FieldOperand(ecx, DebugInfo::kFlagsOffset));
- __ SmiUntag(ebx);
- __ test(ebx, Immediate(DebugInfo::kHasBreakInfo));
+ __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(ecx, FieldOperand(eax, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, ecx);
+ __ mov(ecx, FieldOperand(eax, DebugInfo::kFlagsOffset));
+ __ SmiUntag(ecx);
+ __ and_(ecx, Immediate(DebugInfo::kDebugExecutionMode));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ cmp(ecx, Operand::StaticVariable(debug_execution_mode));
+ __ j(equal, &bytecode_array_loaded);
+
+ __ pop(ecx); // get JSFunction from stack
+ __ push(ecx);
+ __ push(ebx); // preserve feedback_vector and bytecode array register
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(ecx); // pass function as argument
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
__ pop(ebx);
- __ j(zero, &bytecode_array_loaded);
- __ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(ecx, DebugInfo::kDebugBytecodeArrayOffset));
__ jmp(&bytecode_array_loaded);
}
@@ -1013,6 +1043,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -1055,11 +1086,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Call the target.
__ Push(edx); // Re-push return address.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1203,15 +1230,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(ebx);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ // Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(edi);
-
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with unmodified eax, edi, edx values.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1238,10 +1262,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kFunctionOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+ __ Push(eax);
+ __ CmpObjectType(ebx, INTERPRETER_DATA_TYPE, eax);
+ __ j(not_equal, &builtin_trampoline, Label::kNear);
+
+ __ mov(ebx, FieldOperand(ebx, InterpreterData::kInterpreterTrampolineOffset));
+ __ jmp(&trampoline_loaded, Label::kNear);
+
+ __ bind(&builtin_trampoline);
__ Move(ebx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
+ __ Pop(eax);
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ push(ebx);
@@ -1313,42 +1355,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
- Register closure = edi;
-
- // Get the feedback vector.
- Register feedback_vector = ebx;
- __ mov(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(ecx, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ mov(FieldOperand(edi, JSFunction::kCodeOffset), ecx);
__ RecordWriteField(edi, JSFunction::kCodeOffset, ecx, ebx, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1356,6 +1365,77 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Immediate(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi unnecessarily.
+ STATIC_ASSERT(times_2 == times_pointer_size - kSmiTagSize);
+ __ mov(sfi_data, Operand(scratch1, sfi_data, times_2, 0));
+ __ jmp(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ mov(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
+ __ mov(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
+ __ j(not_equal, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ jmp(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpw(data_type, Immediate(CODE_TYPE));
+ __ j(equal, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
+ __ j(not_equal, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ jmp(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpw(data_type, Immediate(TUPLE2_TYPE));
+ __ j(not_equal, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ jmp(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ j(not_equal, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ jmp(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
+ __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ mov(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1378,12 +1458,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, ecx);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = ecx;
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, ebx);
- // If SFI points to anything other than CompileLazy, install that.
- __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(ebx, masm->CodeObject());
__ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime);
@@ -1451,26 +1532,13 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
- __ mov(shared, FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- __ mov(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
- target_builtin);
+ __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
__ push(eax); // Write barrier clobbers these below.
__ push(target_builtin);
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, target_builtin,
- eax, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(target_builtin); // eax is popped later, shared is now available.
-
- // And second to the target function.
-
- __ mov(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
- __ push(target_builtin); // Write barrier clobbers these below.
__ RecordWriteField(target, JSFunction::kCodeOffset, target_builtin, eax,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ pop(target_builtin);
@@ -1845,9 +1913,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code;
- // Get the InternalArray function.
- __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1863,6 +1928,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ mov(ebx, masm->isolate()->factory()->undefined_value());
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1870,15 +1936,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
+ // -- edi : array function
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
- __ mov(edx, edi);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array function should be a map.
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1889,9 +1952,17 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Assert(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
- // Run the native code for the Array function called as a normal function.
- // tail call a stub
+ // ebx is the AllocationSite - here undefined.
__ mov(ebx, masm->isolate()->factory()->undefined_value());
+ // If edx (new target) is undefined, then this is the 'Call' case, so move
+ // edi (the constructor) to rdx.
+ Label call;
+ __ cmp(edx, ebx);
+ __ j(not_equal, &call);
+ __ mov(edx, edi);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -2115,7 +2186,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -2125,7 +2196,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ test(FieldOperand(edx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
@@ -2368,18 +2439,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
+ __ AssertConstructor(edi);
__ AssertFunction(edi);
// Calling convention for function specific ConstructStubs require
// ebx to contain either an AllocationSite or undefined.
__ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
- __ jmp(ecx);
+ __ test(FieldOperand(ecx, SharedFunctionInfo::kFlagsOffset),
+ Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ j(zero, &call_generic_stub, Label::kNear);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2389,6 +2469,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
+ __ AssertConstructor(edi);
__ AssertBoundFunction(edi);
// Push the [[BoundArguments]] onto the stack.
@@ -2421,16 +2502,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(edi, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET);
-
// Check if target has a [[Construct]] internal method.
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
+ // Dispatch based on instance type.
+ __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
@@ -2680,10 +2762,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = esi; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr Register gp_regs[]{eax, ebx, ecx, edx, esi};
+ constexpr Register gp_regs[]{eax, ebx, ecx, edx};
constexpr XMMRegister xmm_regs[]{xmm1, xmm2, xmm3, xmm4, xmm5, xmm6};
for (auto reg : gp_regs) {
@@ -2694,12 +2778,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(Operand(esp, 16 * i), xmm_regs[i]);
}
- // Initialize rsi register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(esi, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in edi.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ // The entrypoint address is the first return value.
+ __ mov(edi, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
for (int i = arraysize(xmm_regs) - 1; i >= 0; --i) {
@@ -2710,7 +2798,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(gp_regs[i]);
}
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ jmp(edi);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 0d42834612..34faac0969 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -82,19 +82,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,9 +90,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -120,6 +104,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -127,29 +112,33 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
+ // -- a1 : array function
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, t0);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, t0,
Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
+ __ GetObjectType(a2, t1, t0);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, t0,
Operand(MAP_TYPE));
}
- // Run the native code for the Array function called as a normal function.
- // Tail call a stub.
- __ mov(a3, a1);
+ // a2 is the AllocationSite - here undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ // If a3 (new target) is undefined, then this is the 'Call' case, so move
+ // a1 (the constructor) to a3.
+ Label call;
+ __ Branch(&call, ne, a3, Operand(a2));
+ __ mov(a3, a1);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,7 +269,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
@@ -401,7 +390,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -444,9 +433,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -562,6 +548,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ lw(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -637,6 +636,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
@@ -737,7 +737,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
@@ -746,9 +746,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak cell to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -782,12 +782,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ lw(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -919,6 +917,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, t0);
__ lw(t0, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(t0, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1041,12 +1040,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ lw(t1, FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(t1, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, t1);
__ lw(t1, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
__ SmiUntag(t1);
- __ And(t1, t1, Operand(DebugInfo::kHasBreakInfo));
- __ Branch(&bytecode_array_loaded, eq, t1, Operand(zero_reg));
- __ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(t0, DebugInfo::kDebugBytecodeArrayOffset));
+ __ And(t1, t1, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ li(t0, Operand(debug_execution_mode));
+ __ lb(t0, MemOperand(t0));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ Branch(&bytecode_array_loaded, eq, t0, Operand(t1));
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ Branch(&bytecode_array_loaded);
}
@@ -1090,6 +1107,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1118,11 +1136,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1165,14 +1179,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(a2, t0);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1194,10 +1207,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ li(t0, Operand(BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)));
+
+ __ bind(&trampoline_loaded);
__ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1272,43 +1303,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = a1;
-
- // Get the feedback vector.
- Register feedback_vector = a2;
- __ lw(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
- Operand(at));
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(a2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ sw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, t0, kRAHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1316,6 +1313,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ li(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi.
+ STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+ STATIC_ASSERT(kSmiShiftSize == 0);
+ __ Lsa(scratch1, scratch1, sfi_data, kPointerSizeLog2 - kSmiTagSize);
+ __ lw(sfi_data, MemOperand(scratch1));
+ __ Branch(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ lw(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Branch(&check_is_code, ne, data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Branch(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ Branch(&done, eq, data_type, Operand(CODE_TYPE));
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
+ Operand(FIXED_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ Branch(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ Branch(&check_is_function_template_info, ne, data_type,
+ Operand(TUPLE2_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ Branch(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ Branch(&check_is_interpreter_data, ne, data_type,
+ Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
+ Operand(INTERPRETER_DATA_TYPE));
+ }
+ __ lw(sfi_data, FieldMemOperand(
+ sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1338,12 +1405,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, t0, t3, t1);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = t0;
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, t1);
- // If SFI points to anything other than CompileLazy, install that.
- __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(t1, masm->CodeObject());
__ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
@@ -1411,25 +1479,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ lw(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
- CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
-
- __ sw(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ sw(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(t3, target_builtin); // Write barrier clobbers t3 below.
@@ -2006,7 +2058,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2016,7 +2068,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
@@ -2238,17 +2290,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- a1 : the constructor to call (checked to be a JSFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertFunction(a1);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset));
+ __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2258,6 +2320,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into t0.
@@ -2352,16 +2415,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(a1, &non_constructor);
- // Dispatch based on instance type.
- __ GetObjectType(a1, t1, t2);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-
// Check if target has a [[Construct]] internal method.
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+ // Dispatch based on instance type.
+ __ lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2566,24 +2630,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = a0; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs = Register::ListOf<a0, a1, a2, a3>();
+ constexpr RegList gp_regs = Register::ListOf<a1, a2, a3>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
- __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
+ // Finally, jump to the entrypoint.
+ __ Jump(at, v0, 0);
}
#undef __
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 15fdfc3d7d..7756872b14 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -82,19 +82,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -103,9 +90,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
@@ -120,6 +104,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// Tail call a stub.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -127,29 +112,33 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
+ // -- a1 : array function
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, a1);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
- __ GetObjectType(a2, a3, a4);
+ __ GetObjectType(a2, t0, a4);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction2, a4,
Operand(MAP_TYPE));
}
- // Run the native code for the Array function called as a normal function.
- // Tail call a stub.
- __ mov(a3, a1);
+ // a2 is the AllocationSite - here undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ // If a3 (new target) is undefined, then this is the 'Call' case, so move
+ // a1 (the constructor) to a3.
+ Label call;
+ __ Branch(&call, ne, a3, Operand(a2));
+ __ mov(a3, a1);
+
+ // Run the native code for the Array function called as a normal function.
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,7 +269,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
@@ -401,7 +390,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
__ And(t2, t2, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -445,13 +434,23 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -526,6 +525,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, a3, a0);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
@@ -734,7 +734,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, a0, a1, a3, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = a1;
Register optimized_code_entry = scratch1;
@@ -743,9 +743,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -779,12 +779,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ Ld(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -916,6 +914,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Ld(a0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, a4);
__ Ld(a4, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(a4, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1039,12 +1038,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ Ld(a5, FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(a5, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mov(kInterpreterBytecodeArrayRegister, a5);
__ Ld(a5, FieldMemOperand(a4, DebugInfo::kFlagsOffset));
__ SmiUntag(a5);
- __ And(a5, a5, Operand(DebugInfo::kHasBreakInfo));
- __ Branch(&bytecode_array_loaded, eq, a5, Operand(zero_reg));
- __ Ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a4, DebugInfo::kDebugBytecodeArrayOffset));
+ __ And(a5, a5, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ li(a4, Operand(debug_execution_mode));
+ __ Lb(a4, MemOperand(a4));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ Branch(&bytecode_array_loaded, eq, a4, Operand(a5));
+
+ __ push(closure);
+ __ push(feedback_vector);
+ __ push(kInterpreterBytecodeArrayRegister);
+ __ push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ pop(kInterpreterBytecodeArrayRegister);
+ __ pop(feedback_vector);
+ __ pop(closure);
__ Branch(&bytecode_array_loaded);
}
@@ -1087,6 +1104,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1115,11 +1133,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1162,15 +1176,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(a2, t0);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(a1);
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1192,10 +1204,28 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ li(t0, Operand(BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline)));
+
+ __ bind(&trampoline_loaded);
__ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1270,43 +1300,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : argument count (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -- a1 : target function (preserved for callee)
- // -----------------------------------
- Register closure = a1;
-
- // Get the feedback vector.
- Register feedback_vector = a2;
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector, feedback_vector,
- Operand(at));
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(a2);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(a2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1314,6 +1310,76 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ li(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ // Avoid untagging the Smi by merging the shift
+ STATIC_ASSERT(kPointerSizeLog2 < kSmiShift);
+ __ dsrl(sfi_data, sfi_data, kSmiShift - kPointerSizeLog2);
+ __ Daddu(scratch1, scratch1, sfi_data);
+ __ Ld(sfi_data, MemOperand(scratch1));
+ __ Branch(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ Ld(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ Lhu(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ Branch(&check_is_code, ne, data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ Branch(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ Branch(&done, eq, data_type, Operand(CODE_TYPE));
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ Branch(&check_is_pre_parsed_scope_data, ne, data_type,
+ Operand(FIXED_ARRAY_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ Branch(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ Branch(&check_is_function_template_info, ne, data_type,
+ Operand(TUPLE2_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ Branch(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ Branch(&check_is_interpreter_data, ne, data_type,
+ Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData, data_type,
+ Operand(INTERPRETER_DATA_TYPE));
+ }
+ __ Ld(sfi_data, FieldMemOperand(
+ sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1336,12 +1402,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, a4, t3, a5);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = a4;
__ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, t1);
- // If SFI points to anything other than CompileLazy, install that.
- __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
__ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
@@ -1410,25 +1476,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ Ld(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(t1 != target && t1 != scratch0 && t1 != scratch1);
- CHECK(t3 != target && t3 != scratch0 && t3 != scratch1);
-
- __ Sd(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ mov(t3, target_builtin); // Write barrier clobbers t3 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, t3, t1,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ Sd(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(t3, target_builtin); // Write barrier clobbers t3 below.
@@ -2031,7 +2081,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that function is not a "classConstructor".
Label class_constructor;
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2041,7 +2091,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
__ And(at, a3,
Operand(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
@@ -2259,18 +2309,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- a1 : the constructor to call (checked to be a JSFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertFunction(a1);
// Calling convention for function specific ConstructStubs require
// a2 to contain either an AllocationSite or undefined.
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2280,6 +2339,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -- a3 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(a1);
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into a4.
@@ -2372,16 +2432,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(a1, &non_constructor);
- // Dispatch based on instance type.
- __ GetObjectType(a1, t1, t2);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
-
// Check if target has a [[Construct]] internal method.
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(Map::IsConstructorBit::kMask));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+ // Dispatch based on instance type.
+ __ Lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
@@ -2588,26 +2649,32 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = a0; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<a0, a1, a2, a3, a4, a5, a6, a7>();
+ constexpr RegList gp_regs = Register::ListOf<a1, a2, a3, a4, a5, a6, a7>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<f2, f4, f6, f8, f10, f12, f14>();
__ MultiPush(gp_regs);
__ MultiPushFPU(fp_regs);
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(kContextRegister, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
+ // The WASM instance is the second return value.
+ __ mov(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopFPU(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
- __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ // Finally, jump to the entrypoint.
+ __ Jump(v0);
}
#undef __
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 9206920d45..2989d69a4b 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -81,19 +81,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the Array function from the current native context.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, result);
-}
-
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -102,9 +89,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r4);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +102,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,27 +110,32 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
+ // -- r4 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the Array function.
- GenerateLoadArrayFunction(masm, r4);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r5, r0);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+ __ CompareObjectType(r5, r7, r8, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // r5 is the AllocationSite - here undefined.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ // If r6 (new target) is undefined, then this is the 'Call' case, so move
+ // r4 (the constructor) to r6.
+ Label call;
+ __ cmp(r6, r5);
+ __ bne(&call);
__ mr(r6, r4);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -286,7 +276,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver, cr0);
@@ -413,7 +403,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Throw if constructor function is a class constructor
__ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r7, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver, cr0);
@@ -458,13 +448,22 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ bne(&done);
+ __ LoadP(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -545,7 +544,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r6, r3);
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -751,7 +752,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r3, r4, r6, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r4;
Register optimized_code_entry = scratch1;
@@ -761,9 +762,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -798,12 +799,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -825,7 +824,6 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
__ addi(r5, optimized_code_entry,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mr(ip, r5);
__ Jump(r5);
// Optimized code slot contains deoptimized code, evict it and re-enter the
@@ -941,6 +939,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r7);
__ LoadP(r7, FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
__ TestIfSmi(r7, r0);
__ bne(&maybe_load_debug_bytecode_array, cr0);
@@ -1070,15 +1069,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
- Label done;
__ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ mr(kInterpreterBytecodeArrayRegister, ip);
__ LoadP(ip, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
__ SmiUntag(ip);
- __ andi(r0, ip, Operand(DebugInfo::kHasBreakInfo));
- __ beq(&done, cr0);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r7, DebugInfo::kDebugBytecodeArrayOffset));
- __ bind(&done);
+ __ andi(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r7, Operand(debug_execution_mode));
+ __ lwz(r7, MemOperand(r7));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ cmp(r7, ip);
+ __ beq(&bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
__ b(&bytecode_array_loaded);
}
@@ -1117,6 +1128,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1146,11 +1158,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1196,16 +1204,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r4);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r3, r4, and r6 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1227,10 +1232,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r5, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ bne(&builtin_trampoline);
+
+ __ LoadP(r5,
+ FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r5, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ mtlr(r0);
@@ -1304,43 +1328,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r4;
-
- // Get the feedback vector.
- Register feedback_vector = r5;
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
- __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r5);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r5, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
__ RecordWriteField(r4, JSFunction::kCodeOffset, r5, r7, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1348,6 +1338,79 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ SmiUntag(sfi_data, LeaveRC, kPointerSizeLog2);
+ __ LoadPX(sfi_data, MemOperand(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ LoadHalfWord(data_type,
+ FieldMemOperand(data_type, Map::kInstanceTypeOffset), r0);
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpi(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ bne(&check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpi(data_type, Operand(CODE_TYPE));
+ __ beq(&done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpi(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ bne(&check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpi(data_type, Operand(TUPLE2_TYPE));
+ __ bne(&check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpi(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ bne(&check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpi(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ LoadP(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1371,13 +1434,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r7, r9, r8);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r7;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r8);
- // If SFI points to anything other than CompileLazy, install that.
- __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ mov(r8, Operand(masm->CodeObject()));
__ cmp(entry, r8);
__ beq(&gotta_call_runtime);
@@ -1447,25 +1512,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ LoadP(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
- CHECK(r9 != target && r9 != scratch0 && r9 != scratch1);
-
- __ StoreP(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset), r0);
- __ mr(r9, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r9, r8,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset),
r0);
@@ -2073,7 +2122,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor, cr0);
@@ -2325,18 +2374,28 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r4 : the constructor to call (checked to be a JSFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r4);
__ AssertFunction(r4);
// Calling convention for function specific ConstructStubs require
// r5 to contain either an AllocationSite or undefined.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
- __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
+ __ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ and_(r7, r7, ip, SetRC);
+ __ beq(&call_generic_stub, cr0);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2346,6 +2405,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -- r6 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r4);
__ AssertBoundFunction(r4);
// Push the [[BoundArguments]] onto the stack.
@@ -2378,16 +2438,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r4, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
__ TestBit(r5, Map::IsConstructorBit::kShift, r0);
__ beq(&non_constructor, cr0);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2592,28 +2653,33 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r10; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr RegList gp_regs =
- Register::ListOf<r3, r4, r5, r6, r7, r8, r9, r10>();
+ constexpr RegList gp_regs = Register::ListOf<r3, r4, r5, r6, r7, r8, r9>();
constexpr RegList fp_regs =
DoubleRegister::ListOf<d1, d2, d3, d4, d5, d6, d7, d8>();
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r11.
- __ addi(r11, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ mr(r11, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ mr(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(r11);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 86239e0052..bf7229ac69 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -81,13 +81,6 @@ void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
- Register result) {
- // Load the InternalArray function from the current native context.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, result);
-}
-
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the Array function from the current native context.
@@ -102,9 +95,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
- // Get the InternalArray function.
- GenerateLoadInternalArrayFunction(masm, r3);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
@@ -118,6 +108,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -125,6 +116,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
+ // -- r3 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
@@ -138,14 +130,21 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ TestIfSmi(r4);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction, cr0);
- __ CompareObjectType(r4, r5, r6, MAP_TYPE);
+ __ CompareObjectType(r4, r6, r7, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // r4 is the AllocationSite - here undefined.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ // If r5 (new target) is undefined, then this is the 'Call' case, so move
+ // r3 (the constructor) to r5.
+ Label call;
+ __ CmpP(r5, r4);
+ __ bne(&call);
__ LoadRR(r5, r3);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -280,8 +279,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r6,
- FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsDerivedConstructorBit::kMask, r0);
__ bne(&not_create_implicit_receiver);
@@ -406,8 +404,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// Throw if constructor function is a class constructor
__ LoadP(r6, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r6,
- FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ beq(&use_receiver);
} else {
@@ -449,13 +446,22 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
+ __ bne(&done, Label::kNear);
+ __ LoadP(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -540,7 +546,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, r5, r1);
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
}
@@ -754,7 +762,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(
!AreAliased(feedback_vector, r2, r3, r5, scratch1, scratch2, scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = r3;
Register optimized_code_entry = scratch1;
@@ -764,9 +772,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -801,12 +809,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ LoadP(optimized_code_entry,
- FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -944,6 +950,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r6);
__ LoadP(r6, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
__ TestIfSmi(r6);
__ bne(&maybe_load_debug_bytecode_array);
@@ -1069,15 +1076,27 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
- Label done;
__ bind(&maybe_load_debug_bytecode_array);
+ __ LoadP(ip, FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(ip, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
+
+ __ LoadRR(kInterpreterBytecodeArrayRegister, ip);
__ LoadP(ip, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
__ SmiUntag(ip);
- __ tmll(ip, Operand(DebugInfo::kHasBreakInfo));
- __ beq(&done);
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r6, DebugInfo::kDebugBytecodeArrayOffset));
- __ bind(&done);
+ __ AndP(ip, ip, Operand(DebugInfo::kDebugExecutionMode));
+
+ ExternalReference debug_execution_mode =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ __ mov(r6, Operand(debug_execution_mode));
+ __ LoadW(r6, MemOperand(r6));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ __ CmpP(r6, ip);
+ __ beq(&bytecode_array_loaded);
+
+ __ Push(closure, feedback_vector, kInterpreterBytecodeArrayRegister, closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(closure, feedback_vector, kInterpreterBytecodeArrayRegister);
__ b(&bytecode_array_loaded);
}
@@ -1118,6 +1137,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- r2 : the number of arguments (not including the receiver)
// -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1145,11 +1165,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
}
// Call the target.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(
- masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1195,16 +1211,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
} else {
__ AssertUndefinedOrAllocationSite(r4, r7);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
__ AssertFunction(r3);
- // Tail call to the function-specific construct stub (still in the caller
+ // Tail call to the array construct stub (still in the caller
// context at this point).
- __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
- // Jump to the construct function.
- __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r2, r3, and r5 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1226,10 +1239,29 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
+ Label builtin_trampoline, trampoline_loaded;
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r4, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister,
+ INTERPRETER_DATA_TYPE);
+ __ bne(&builtin_trampoline);
+
+ __ LoadP(r4,
+ FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
+ __ b(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
__ Move(r4, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1301,43 +1333,9 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : argument count (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -- r4 : target function (preserved for callee)
- // -----------------------------------
- Register closure = r3;
-
- // Get the feedback vector.
- Register feedback_vector = r4;
- __ LoadP(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ LoadP(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(ne, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
- __ AddP(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(r4);
-}
-
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r4, FieldMemOperand(r4, SharedFunctionInfo::kCodeOffset));
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(r4, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ StoreP(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
__ RecordWriteField(r3, JSFunction::kCodeOffset, r4, r6, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
@@ -1345,6 +1343,79 @@ void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ mov(scratch1,
+ Operand(ExternalReference::builtins_address(masm->isolate())));
+ __ SmiUntag(sfi_data, kPointerSizeLog2);
+ __ LoadP(sfi_data, MemOperand(scratch1, sfi_data));
+ __ b(&done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ LoadP(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
+ __ LoadHalfWordP(data_type,
+ FieldMemOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ CmpP(data_type, Operand(BYTECODE_ARRAY_TYPE));
+ __ bne(&check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ b(&done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ CmpP(data_type, Operand(CODE_TYPE));
+ __ beq(&done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ CmpP(data_type, Operand(FIXED_ARRAY_TYPE));
+ __ bne(&check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ b(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ CmpP(data_type, Operand(TUPLE2_TYPE));
+ __ bne(&check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ b(&done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ CmpP(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ bne(&check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ b(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ CmpP(data_type, Operand(INTERPRETER_DATA_TYPE));
+ __ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ LoadP(
+ sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1368,13 +1439,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r6, r8, r7);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = r6;
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(entry,
+ FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, r7);
- // If SFI points to anything other than CompileLazy, install that.
- __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ mov(r7, Operand(masm->CodeObject()));
__ CmpP(entry, r7);
__ beq(&gotta_call_runtime);
@@ -1444,25 +1517,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
}
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ LoadP(shared,
- FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r7 != target && r7 != scratch0 && r7 != scratch1);
- CHECK(r8 != target && r8 != scratch0 && r8 != scratch1);
-
- __ StoreP(target_builtin,
- FieldMemOperand(shared, SharedFunctionInfo::kCodeOffset));
- __ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r8, r7,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ StoreP(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ LoadRR(r8, target_builtin); // Write barrier clobbers r9 below.
@@ -2070,7 +2127,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kCompilerHintsOffset));
+ __ LoadlW(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
__ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0);
__ bne(&class_constructor);
@@ -2324,18 +2381,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- r3 : the constructor to call (checked to be a JSFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r3, r1);
__ AssertFunction(r3);
// Calling convention for function specific ConstructStubs require
// r4 to contain either an AllocationSite or undefined.
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset));
- __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ JumpToJSEntry(ip);
+ __ LoadlW(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
+ __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ beq(&call_generic_stub);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2345,6 +2411,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- r3 : the function to call (checked to be a JSBoundFunction)
// -- r5 : the new target (checked to be a constructor)
// -----------------------------------
+ __ AssertConstructor(r3, r1);
__ AssertBoundFunction(r3);
// Push the [[BoundArguments]] onto the stack.
@@ -2377,16 +2444,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(r3, &non_constructor);
- // Dispatch based on instance type.
- __ CompareObjectType(r3, r6, r7, JS_FUNCTION_TYPE);
- __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET, eq);
-
// Check if target has a [[Construct]] internal method.
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadlB(r4, FieldMemOperand(r6, Map::kBitFieldOffset));
__ TestBit(r4, Map::IsConstructorBit::kShift);
__ beq(&non_constructor);
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r6, r7, JS_FUNCTION_TYPE);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, eq);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpP(r7, Operand(JS_BOUND_FUNCTION_TYPE));
@@ -2590,6 +2658,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = r6; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
@@ -2602,18 +2672,22 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPush(gp_regs);
__ MultiPushDoubles(fp_regs);
- // Initialize cp register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ LoadSmiLiteral(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in ip.
- __ AddP(ip, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // The entrypoint address is the first return value.
+ __ LoadRR(ip, r2);
+ // The WASM instance is the second return value.
+ __ LoadRR(wasm_instance_reg, kReturnRegister1);
// Restore registers.
__ MultiPopDoubles(fp_regs);
__ MultiPop(gp_regs);
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ Jump(ip);
}
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index d30cd02ab5..45040844c3 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -114,8 +114,9 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
- Code::BUILTIN, name, builtin_index);
+ compiler::CodeAssemblerState state(
+ isolate, &zone, argc_with_recv, Code::BUILTIN, name,
+ PoisoningMitigationLevel::kOff, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -141,7 +142,8 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
compiler::CodeAssemblerState state(isolate, &zone, descriptor, Code::BUILTIN,
- name, result_size, 0, builtin_index);
+ name, PoisoningMitigationLevel::kOff,
+ result_size, 0, builtin_index);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
@@ -186,7 +188,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (!target->is_builtin()) continue;
Code* new_target =
Code::cast(builtins->builtins_[target->builtin_index()]);
- rinfo->set_target_address(new_target->instruction_start(),
+ rinfo->set_target_address(new_target->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
@@ -202,8 +204,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
flush_icache = true;
}
if (flush_icache) {
- Assembler::FlushICache(code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
}
}
}
@@ -283,17 +285,6 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
#undef SET_EXCEPTION_CAUGHT_PREDICTION
- // TODO(mstarzinger,6792): This code-space modification section should be
- // moved into {Heap} eventually and a safe wrapper be provided.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
-
-#define SET_CODE_NON_TAGGED_PARAMS(Name) \
- Code::cast(builtins->builtins_[Builtins::k##Name]) \
- ->set_has_tagged_params(false);
-
- BUILTINS_WITH_UNTAGGED_PARAMS(SET_CODE_NON_TAGGED_PARAMS)
-#undef SET_CODE_NON_TAGGED_PARAMS
-
builtins->MarkInitialized();
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index ae7e0c151e..ad64b02af9 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -221,7 +221,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
// -----------------------------------
__ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsDerivedConstructorBit::kMask));
__ j(not_zero, &not_create_implicit_receiver, Label::kNear);
@@ -341,7 +341,7 @@ void Generate_JSConstructStubGeneric(MacroAssembler* masm,
__ bind(&other_result);
__ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rbx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
if (restrict_constructor_return) {
@@ -385,9 +385,6 @@ void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
MacroAssembler* masm) {
return Generate_JSConstructStubGeneric(masm, false);
}
-void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSBuiltinsConstructStubHelper(masm);
-}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
@@ -562,6 +559,19 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1) {
+ Label done;
+
+ __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
+ __ j(not_equal, &done, Label::kNear);
+ __ movp(sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
// static
void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -639,6 +649,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, rcx, kScratchRegister);
__ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
}
@@ -745,7 +756,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
DCHECK(!AreAliased(feedback_vector, rax, rdx, rdi, scratch1, scratch2,
scratch3));
- Label optimized_code_slot_is_cell, fallthrough;
+ Label optimized_code_slot_is_weak_ref, fallthrough;
Register closure = rdi;
Register optimized_code_entry = scratch1;
@@ -754,9 +765,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
- // optimisation marker. Otherwise, interpret is as a weak cell to a code
+ // optimisation marker. Otherwise, interpret it as a weak reference to a code
// object.
- __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_cell);
+ __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
{
// Optimized code slot is a Smi optimization marker.
@@ -790,12 +801,10 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
{
- // Optimized code slot is a WeakCell.
- __ bind(&optimized_code_slot_is_cell);
+ // Optimized code slot is a weak reference.
+ __ bind(&optimized_code_slot_is_weak_ref);
- __ movp(optimized_code_entry,
- FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
- __ JumpIfSmi(optimized_code_entry, &fallthrough);
+ __ LoadWeakValue(optimized_code_entry, &fallthrough);
// Check if the optimized code is marked for deopt. If it is, call the
// runtime to clear it.
@@ -876,7 +885,7 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
#define JUMP_IF_EQUAL(NAME) \
__ cmpb(bytecode, \
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
- __ j(equal, if_return, Label::kNear);
+ __ j(equal, if_return, Label::kFar);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
@@ -927,6 +936,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
+ kScratchRegister);
__ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
&maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
@@ -1047,13 +1058,33 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
+ __ movp(rax, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ movp(rcx, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
- __ SmiToInteger32(kScratchRegister,
- FieldOperand(rcx, DebugInfo::kFlagsOffset));
- __ testl(kScratchRegister, Immediate(DebugInfo::kHasBreakInfo));
- __ j(zero, &bytecode_array_loaded);
- __ movp(kInterpreterBytecodeArrayRegister,
+ __ movp(kScratchRegister,
FieldOperand(rcx, DebugInfo::kDebugBytecodeArrayOffset));
+ __ JumpIfRoot(kScratchRegister, Heap::kUndefinedValueRootIndex,
+ &bytecode_array_loaded);
+
+ __ movp(kInterpreterBytecodeArrayRegister, kScratchRegister);
+ __ SmiToInteger32(rax, FieldOperand(rcx, DebugInfo::kFlagsOffset));
+ __ andb(rax, Immediate(DebugInfo::kDebugExecutionMode));
+ STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
+ static_cast<int>(DebugInfo::kSideEffects));
+ ExternalReference debug_execution_mode_address =
+ ExternalReference::debug_execution_mode_address(masm->isolate());
+ Operand debug_execution_mode =
+ masm->ExternalOperand(debug_execution_mode_address);
+ __ cmpb(rax, debug_execution_mode);
+ __ j(equal, &bytecode_array_loaded);
+
+ __ Push(closure);
+ __ Push(feedback_vector);
+ __ Push(kInterpreterBytecodeArrayRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kDebugApplyInstrumentation);
+ __ Pop(kInterpreterBytecodeArrayRegister);
+ __ Pop(feedback_vector);
+ __ Pop(closure);
__ jmp(&bytecode_array_loaded);
}
@@ -1082,6 +1113,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
void Builtins::Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -1118,10 +1150,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- __ Jump(masm->isolate()->builtins()->CallFunction(receiver_mode),
- RelocInfo::CODE_TARGET);
- } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
RelocInfo::CODE_TARGET);
} else {
@@ -1176,16 +1205,13 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
__ AssertUndefinedOrAllocationSite(rbx);
}
- if (mode == InterpreterPushArgsMode::kJSFunction) {
- // Tail call to the function-specific construct stub (still in the caller
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ // Tail call to the array construct stub (still in the caller
// context at this point).
__ AssertFunction(rdi);
-
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
// Jump to the constructor function (rax, rbx, rdx passed on).
- __ jmp(rcx);
+ ArrayConstructorStub array_constructor_stub(masm->isolate());
+ __ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor (rax, rdx, rdi passed on).
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@@ -1208,12 +1234,30 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
- // TODO(jgruber,v8:6666): Update logic once builtin is off-heap-safe.
- DCHECK(!Builtins::IsOffHeapSafe(Builtins::kInterpreterEntryTrampoline));
+ Label builtin_trampoline, trampoline_loaded;
+ // TODO(jgruber,v8:6666): Update logic once builtin is isolate-independent.
+ DCHECK(
+ !Builtins::IsIsolateIndependent(Builtins::kInterpreterEntryTrampoline));
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
+
+ // If the SFI function_data is an InterpreterData, get the trampoline stored
+ // in it, otherwise get the trampoline from the builtins list.
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
+ __ j(not_equal, &builtin_trampoline, Label::kNear);
+
+ __ movp(rbx,
+ FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
+ __ jmp(&trampoline_loaded, Label::kNear);
+
+ __ bind(&builtin_trampoline);
__ Move(rbx, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+
+ __ bind(&trampoline_loaded);
__ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
__ Push(rbx);
@@ -1286,52 +1330,89 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
-void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : argument count (preserved for callee)
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
- Register closure = rdi;
-
- // Get the feedback vector.
- Register feedback_vector = rbx;
- __ movp(feedback_vector,
- FieldOperand(closure, JSFunction::kFeedbackCellOffset));
- __ movp(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
-
- // The feedback vector must be defined.
- if (FLAG_debug_code) {
- __ CompareRoot(feedback_vector, Heap::kUndefinedValueRootIndex);
- __ Assert(not_equal, AbortReason::kExpectedFeedbackVector);
- }
-
- // Is there an optimization marker or optimized code in the feedback vector?
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
-
- // Otherwise, tail call the SFI code.
- static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
- __ jmp(rcx);
-}
-
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
// builtin does not set the code field in the JS function. If there isn't then
// we do not need this builtin and can jump directly to CompileLazy.
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
- // Set the code slot inside the JSFunction to the trampoline to the
- // interpreter entry.
- __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ movq(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
+ // Set the code slot inside the JSFunction to CompileLazy.
+ __ Move(rcx, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ movp(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
__ RecordWriteField(rdi, JSFunction::kCodeOffset, rcx, r15, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Jump to compile lazy.
Generate_CompileLazy(masm);
}
+static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
+ Register scratch1) {
+ // Figure out the SFI's code object.
+ Label done;
+ Label check_is_bytecode_array;
+ Label check_is_code;
+ Label check_is_fixed_array;
+ Label check_is_pre_parsed_scope_data;
+ Label check_is_function_template_info;
+ Label check_is_interpreter_data;
+
+ Register data_type = scratch1;
+
+ // IsSmi: Is builtin
+ __ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
+ __ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
+ SmiIndex index = masm->SmiToIndex(sfi_data, sfi_data, kPointerSizeLog2);
+ __ movp(sfi_data, Operand(scratch1, index.reg, index.scale, 0));
+ __ j(always, &done);
+
+ // Get map for subsequent checks.
+ __ bind(&check_is_bytecode_array);
+ __ movp(data_type, FieldOperand(sfi_data, HeapObject::kMapOffset));
+ __ movw(data_type, FieldOperand(data_type, Map::kInstanceTypeOffset));
+
+ // IsBytecodeArray: Interpret bytecode
+ __ cmpw(data_type, Immediate(BYTECODE_ARRAY_TYPE));
+ __ j(not_equal, &check_is_code);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
+ __ j(always, &done);
+
+ // IsCode: Run code
+ __ bind(&check_is_code);
+ __ cmpw(data_type, Immediate(CODE_TYPE));
+ __ j(equal, &done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ __ bind(&check_is_fixed_array);
+ __ cmpw(data_type, Immediate(FIXED_ARRAY_TYPE));
+ __ j(not_equal, &check_is_pre_parsed_scope_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
+ __ j(always, &done);
+
+ // IsPreParsedScopeData: Compile lazy
+ __ bind(&check_is_pre_parsed_scope_data);
+ __ cmpw(data_type, Immediate(TUPLE2_TYPE));
+ __ j(not_equal, &check_is_function_template_info);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
+ __ j(always, &done);
+
+ // IsFunctionTemplateInfo: API call
+ __ bind(&check_is_function_template_info);
+ __ cmpw(data_type, Immediate(FUNCTION_TEMPLATE_INFO_TYPE));
+ __ j(not_equal, &check_is_interpreter_data);
+ __ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
+ __ j(always, &done);
+
+ // IsInterpreterData: Interpret bytecode with unique interpreter
+ __ bind(&check_is_interpreter_data);
+ if (FLAG_debug_code) {
+ __ cmpw(data_type, Immediate(INTERPRETER_DATA_TYPE));
+ __ Check(equal, AbortReason::kInvalidSharedFunctionInfoData);
+ }
+ __ movp(
+ sfi_data,
+ FieldOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
+
+ __ bind(&done);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1354,12 +1435,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, rcx, r14, r15);
- // We found no optimized code.
+ // We found no optimized code. Infer the code object needed for the SFI.
Register entry = rcx;
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoCode(masm, entry, rbx);
- // If SFI points to anything other than CompileLazy, install that.
- __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
+ // If code entry points to anything other than CompileLazy, install that.
__ Move(rbx, masm->CodeObject());
__ cmpp(entry, rbx);
__ j(equal, &gotta_call_runtime);
@@ -1427,24 +1509,9 @@ void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
{
// If we've reached this spot, the target builtin has been deserialized and
- // we simply need to copy it over. First to the shared function info.
+ // we simply need to copy it over to the target function.
Register target_builtin = scratch1;
- Register shared = scratch0;
-
- __ movp(shared,
- FieldOperand(target, JSFunction::kSharedFunctionInfoOffset));
-
- CHECK(r14 != target && r14 != scratch0 && r14 != scratch1);
- CHECK(r15 != target && r15 != scratch0 && r15 != scratch1);
-
- __ movp(FieldOperand(shared, SharedFunctionInfo::kCodeOffset),
- target_builtin);
- __ movp(r14, target_builtin); // Write barrier clobbers r14 below.
- __ RecordWriteField(shared, SharedFunctionInfo::kCodeOffset, r14, r15,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // And second to the target function.
__ movp(FieldOperand(target, JSFunction::kCodeOffset), target_builtin);
__ movp(r14, target_builtin); // Write barrier clobbers r14 below.
@@ -1830,9 +1897,6 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// -----------------------------------
Label generic_array_code;
- // Get the InternalArray function.
- __ LoadNativeContextSlot(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
-
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1848,6 +1912,7 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
// tail call a stub
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1855,14 +1920,12 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
+ // -- rdi : array function
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
Label generic_array_code;
- // Get the Array function.
- __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rdi);
-
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ movp(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1874,10 +1937,17 @@ void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
__ Check(equal, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
+ // rbx is the AllocationSite - here undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ // If rdx (new target) is undefined, then this is the 'Call' case, so move
+ // rdi (the constructor) to rdx.
+ Label call;
+ __ cmpp(rdx, rbx);
+ __ j(not_equal, &call);
__ movp(rdx, rdi);
+
// Run the native code for the Array function called as a normal function.
- // tail call a stub
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&call);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -2218,7 +2288,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// Check that the function is not a "classConstructor".
Label class_constructor;
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask));
__ j(not_zero, &class_constructor);
@@ -2234,7 +2304,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ testl(FieldOperand(rdx, SharedFunctionInfo::kCompilerHintsOffset),
+ __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
Immediate(SharedFunctionInfo::IsNativeBit::kMask |
SharedFunctionInfo::IsStrictBit::kMask));
__ j(not_zero, &done_convert);
@@ -2479,18 +2549,27 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
+ __ AssertConstructor(rdi);
__ AssertFunction(rdi);
// Calling convention for function specific ConstructStubs require
// rbx to contain either an AllocationSite or undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- // Tail call to the function-specific construct stub (still in the caller
- // context at this point).
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
__ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
- __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
- __ jmp(rcx);
+ __ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
+ Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ j(zero, &call_generic_stub, Label::kNear);
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(masm->isolate()->builtins()->JSConstructStubGeneric(),
+ RelocInfo::CODE_TARGET);
}
// static
@@ -2500,6 +2579,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
+ __ AssertConstructor(rdi);
__ AssertBoundFunction(rdi);
// Push the [[BoundArguments]] onto the stack.
@@ -2534,16 +2614,17 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
Label non_constructor, non_proxy;
__ JumpIfSmi(rdi, &non_constructor, Label::kNear);
- // Dispatch based on instance type.
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
- RelocInfo::CODE_TARGET);
-
// Check if target has a [[Construct]] internal method.
+ __ movq(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::IsConstructorBit::kMask));
__ j(zero, &non_constructor, Label::kNear);
+ // Dispatch based on instance type.
+ __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
+ __ j(equal, BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET);
+
// Only dispatch to bound functions after checking whether they are
// constructors.
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
@@ -2636,10 +2717,12 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ auto wasm_instance_reg = rsi; // TODO(titzer): put in a common place.
+
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
- constexpr Register gp_regs[]{rax, rbx, rcx, rdx, rsi, rdi};
+ constexpr Register gp_regs[]{rax, rbx, rcx, rdx, rdi};
constexpr XMMRegister xmm_regs[]{xmm1, xmm2, xmm3, xmm4, xmm5, xmm6};
for (auto reg : gp_regs) {
@@ -2650,12 +2733,16 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ movdqu(Operand(rsp, 16 * i), xmm_regs[i]);
}
- // Initialize rsi register with kZero, CEntryStub will use it to set the
- // current context on the isolate.
+ // Pass the WASM instance as an explicit argument to WasmCompileLazy.
+ __ Push(wasm_instance_reg);
+ // Initialize the JavaScript context with 0. CEntryStub will use it to
+ // set the current context on the isolate.
__ Move(rsi, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
- // Store returned instruction start in r11.
- __ leap(r11, FieldOperand(rax, Code::kHeaderSize));
+ // The entrypoint address is the first return value.
+ __ movq(r11, kReturnRegister0);
+ // The WASM instance is the second return value.
+ __ movq(wasm_instance_reg, kReturnRegister1);
// Restore registers.
for (int i = arraysize(xmm_regs) - 1; i >= 0; --i) {
@@ -2666,7 +2753,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Pop(gp_regs[i]);
}
}
- // Now jump to the instructions of the returned code object.
+ // Finally, jump to the entrypoint.
__ jmp(r11);
}
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index 7c9cc9cb93..1876773e7a 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -65,7 +65,6 @@ CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
return kTaskRemoved;
}
-
void CancelableTaskManager::CancelAndWait() {
// Clean up all cancelable fore- and background tasks. Tasks are canceled on
// the way if possible, i.e., if they have not started yet. After each round
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index ab5caa7557..7e198d5808 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -48,25 +48,21 @@ inline bool IsDecimalDigit(uc32 c) {
return IsInRange(c, '0', '9');
}
-
inline bool IsHexDigit(uc32 c) {
// ECMA-262, 3rd, 7.6 (p 15)
return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
}
-
inline bool IsOctalDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return IsInRange(c, '0', '7');
}
-
inline bool IsBinaryDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return c == '0' || c == '1';
}
-
inline bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 125d15d61b..439cb54dca 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -15,7 +15,6 @@ namespace v8 {
namespace internal {
class AbstractCode;
-class InstructionStream;
class Name;
class SharedFunctionInfo;
class String;
@@ -25,27 +24,28 @@ class WasmCode;
using WasmName = Vector<const char>;
} // namespace wasm
-#define LOG_EVENTS_AND_TAGS_LIST(V) \
- V(CODE_CREATION_EVENT, "code-creation") \
- V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
- V(CODE_MOVE_EVENT, "code-move") \
- V(CODE_DELETE_EVENT, "code-delete") \
- V(CODE_MOVING_GC, "code-moving-gc") \
- V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
- V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
- V(TICK_EVENT, "tick") \
- V(BUILTIN_TAG, "Builtin") \
- V(CALLBACK_TAG, "Callback") \
- V(EVAL_TAG, "Eval") \
- V(FUNCTION_TAG, "Function") \
- V(HANDLER_TAG, "Handler") \
- V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
- V(LAZY_COMPILE_TAG, "LazyCompile") \
- V(REG_EXP_TAG, "RegExp") \
- V(SCRIPT_TAG, "Script") \
- V(STUB_TAG, "Stub") \
- V(NATIVE_FUNCTION_TAG, "Function") \
- V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+ V(CODE_CREATION_EVENT, "code-creation") \
+ V(CODE_DISABLE_OPT_EVENT, "code-disable-optimization") \
+ V(CODE_MOVE_EVENT, "code-move") \
+ V(CODE_DELETE_EVENT, "code-delete") \
+ V(CODE_MOVING_GC, "code-moving-gc") \
+ V(SHARED_FUNC_MOVE_EVENT, "sfi-move") \
+ V(SNAPSHOT_CODE_NAME_EVENT, "snapshot-code-name") \
+ V(TICK_EVENT, "tick") \
+ V(BUILTIN_TAG, "Builtin") \
+ V(CALLBACK_TAG, "Callback") \
+ V(EVAL_TAG, "Eval") \
+ V(FUNCTION_TAG, "Function") \
+ V(INTERPRETED_FUNCTION_TAG, "InterpretedFunction") \
+ V(HANDLER_TAG, "Handler") \
+ V(BYTECODE_HANDLER_TAG, "BytecodeHandler") \
+ V(LAZY_COMPILE_TAG, "LazyCompile") \
+ V(REG_EXP_TAG, "RegExp") \
+ V(SCRIPT_TAG, "Script") \
+ V(STUB_TAG, "Stub") \
+ V(NATIVE_FUNCTION_TAG, "Function") \
+ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
V(NATIVE_SCRIPT_TAG, "Script")
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
@@ -71,15 +71,12 @@ class CodeEventListener {
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source,
int line, int column) = 0;
- virtual void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ virtual void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) = 0;
virtual void CallbackEvent(Name* name, Address entry_point) = 0;
virtual void GetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void SetterCallbackEvent(Name* name, Address entry_point) = 0;
virtual void RegExpCodeCreateEvent(AbstractCode* code, String* source) = 0;
- virtual void InstructionStreamCreateEvent(LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description) = 0;
virtual void CodeMoveEvent(AbstractCode* from, Address to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
@@ -126,7 +123,7 @@ class CodeEventDispatcher {
CODE_EVENT_DISPATCH(
CodeCreateEvent(tag, code, shared, source, line, column));
}
- void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) {
CODE_EVENT_DISPATCH(CodeCreateEvent(tag, code, name));
}
@@ -142,11 +139,6 @@ class CodeEventDispatcher {
void RegExpCodeCreateEvent(AbstractCode* code, String* source) {
CODE_EVENT_DISPATCH(RegExpCodeCreateEvent(code, source));
}
- void InstructionStreamCreateEvent(LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description) {
- CODE_EVENT_DISPATCH(InstructionStreamCreateEvent(tag, stream, description));
- }
void CodeMoveEvent(AbstractCode* from, Address to) {
CODE_EVENT_DISPATCH(CodeMoveEvent(from, to));
}
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index d3c81d0e81..556efe7d0a 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -154,10 +154,10 @@ void CodeStubAssembler::Check(const NodeGenerator& condition_body,
extra_node4_name, extra_node5, extra_node5_name);
}
-Node* CodeStubAssembler::Select(SloppyTNode<BoolT> condition,
- const NodeGenerator& true_body,
- const NodeGenerator& false_body,
- MachineRepresentation rep) {
+Node* CodeStubAssembler::SelectImpl(TNode<BoolT> condition,
+ const NodeGenerator& true_body,
+ const NodeGenerator& false_body,
+ MachineRepresentation rep) {
VARIABLE(value, rep);
Label vtrue(this), vfalse(this), end(this);
Branch(condition, &vtrue, &vfalse);
@@ -177,37 +177,28 @@ Node* CodeStubAssembler::Select(SloppyTNode<BoolT> condition,
return value.value();
}
-Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value,
- Node* false_value,
- MachineRepresentation rep) {
- return Select(condition, [=] { return true_value; },
- [=] { return false_value; }, rep);
+TNode<Int32T> CodeStubAssembler::SelectInt32Constant(
+ SloppyTNode<BoolT> condition, int true_value, int false_value) {
+ return SelectConstant<Int32T>(condition, Int32Constant(true_value),
+ Int32Constant(false_value));
}
-Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value,
- int false_value) {
- return SelectConstant(condition, Int32Constant(true_value),
- Int32Constant(false_value),
- MachineRepresentation::kWord32);
+TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(
+ SloppyTNode<BoolT> condition, int true_value, int false_value) {
+ return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value),
+ IntPtrConstant(false_value));
}
-Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value,
- int false_value) {
- return SelectConstant(condition, IntPtrConstant(true_value),
- IntPtrConstant(false_value),
- MachineType::PointerRepresentation());
+TNode<Oddball> CodeStubAssembler::SelectBooleanConstant(
+ SloppyTNode<BoolT> condition) {
+ return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant());
}
-Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) {
- return SelectConstant(condition, TrueConstant(), FalseConstant(),
- MachineRepresentation::kTagged);
-}
-
-Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
- Smi* false_value) {
- return SelectConstant(condition, SmiConstant(true_value),
- SmiConstant(false_value),
- MachineRepresentation::kTaggedSigned);
+TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
+ Smi* true_value,
+ Smi* false_value) {
+ return SelectConstant<Smi>(condition, SmiConstant(true_value),
+ SmiConstant(false_value));
}
Node* CodeStubAssembler::NoContextConstant() {
@@ -312,11 +303,10 @@ Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
// value && !(value & (value - 1))
return WordEqual(
- Select(
+ Select<IntPtrT>(
WordEqual(value, IntPtrConstant(0)),
[=] { return IntPtrConstant(1); },
- [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); },
- MachineType::PointerRepresentation()),
+ [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }),
IntPtrConstant(0));
}
@@ -571,11 +561,11 @@ TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
}
TNode<Smi> CodeStubAssembler::SmiMax(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
- return SelectTaggedConstant(SmiLessThan(a, b), b, a);
+ return SelectConstant<Smi>(SmiLessThan(a, b), b, a);
}
TNode<Smi> CodeStubAssembler::SmiMin(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
- return SelectTaggedConstant(SmiLessThan(a, b), a, b);
+ return SelectConstant<Smi>(SmiLessThan(a, b), a, b);
}
TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
@@ -633,9 +623,8 @@ TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
TNode<Smi> const index_smi = CAST(index_int);
result = Select<IntPtrT>(
IntPtrLessThan(SmiUntag(index_smi), zero),
- [&] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); },
- [&] { return IntPtrMin(SmiUntag(index_smi), length); },
- MachineType::PointerRepresentation());
+ [=] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); },
+ [=] { return IntPtrMin(SmiUntag(index_smi), length); });
Goto(&done);
}
@@ -647,37 +636,38 @@ TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
TNode<HeapNumber> const index_hn = CAST(index_int);
TNode<Float64T> const float_zero = Float64Constant(0.);
TNode<Float64T> const index_float = LoadHeapNumberValue(index_hn);
- result = SelectConstant(Float64LessThan(index_float, float_zero), zero,
- length, MachineType::PointerRepresentation());
+ result = SelectConstant<IntPtrT>(Float64LessThan(index_float, float_zero),
+ zero, length);
Goto(&done);
}
BIND(&done);
return result.value();
}
-Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
- VARIABLE(var_result, MachineRepresentation::kTagged);
+TNode<Number> CodeStubAssembler::SmiMod(SloppyTNode<Smi> a,
+ SloppyTNode<Smi> b) {
+ TVARIABLE(Number, var_result);
Label return_result(this, &var_result),
return_minuszero(this, Label::kDeferred),
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
- a = SmiToInt32(a);
- b = SmiToInt32(b);
+ TNode<Int32T> int_a = SmiToInt32(a);
+ TNode<Int32T> int_b = SmiToInt32(b);
// Return NaN if {b} is zero.
- GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
+ GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan);
// Check if {a} is non-negative.
Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
- Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative,
+ Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative,
&if_aisnegative);
BIND(&if_aisnotnegative);
{
// Fast case, don't need to check any other edge cases.
- Node* r = Int32Mod(a, b);
- var_result.Bind(SmiFromInt32(r));
+ TNode<Int32T> r = Int32Mod(int_a, int_b);
+ var_result = SmiFromInt32(r);
Goto(&return_result);
}
@@ -687,14 +677,14 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
- GotoIfNot(Word32Equal(a, Int32Constant(kMinInt)), &join);
- GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
+ GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join);
+ GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
BIND(&join);
}
// Perform the integer modulus operation.
- Node* r = Int32Mod(a, b);
+ TNode<Int32T> r = Int32Mod(int_a, int_b);
// Check if {r} is zero, and if so return -0, because we have to
// take the sign of the left hand side {a}, which is negative.
@@ -702,20 +692,20 @@ Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
// The remainder {r} can be outside the valid Smi range on 32bit
// architectures, so we cannot just say SmiFromInt32(r) here.
- var_result.Bind(ChangeInt32ToTagged(r));
+ var_result = ChangeInt32ToTagged(r);
Goto(&return_result);
}
BIND(&return_minuszero);
- var_result.Bind(MinusZeroConstant());
+ var_result = MinusZeroConstant();
Goto(&return_result);
BIND(&return_nan);
- var_result.Bind(NanConstant());
+ var_result = NanConstant();
Goto(&return_result);
BIND(&return_result);
- return TNode<Object>::UncheckedCast(var_result.value());
+ return var_result.value();
}
TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
@@ -950,20 +940,19 @@ TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
TNode<Object> object, TNode<Context> context,
TNode<Context> native_context) {
Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
+ TVARIABLE(BoolT, var_result);
GotoIfForceSlowPath(&if_false);
- TVARIABLE(BoolT, var_result, Int32TrueConstant());
BranchIfFastJSArray(object, context, &if_fast, &if_false);
BIND(&if_fast);
{
- // Check if the Array.prototype[@@iterator] may have changed.
- GotoIfNot(InitialArrayPrototypeHasInitialArrayPrototypeMap(native_context),
- &if_false);
- // Check if array[@@iterator] may have changed.
- GotoIfNot(HasInitialFastElementsKindMap(native_context, CAST(object)),
- &if_false);
- // Check if the array iterator has changed.
- Branch(HasInitialArrayIteratorPrototypeMap(native_context), &exit,
- &if_false);
+ // Check that the Array.prototype hasn't been modified in a way that would
+ // affect iteration.
+ Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
+ var_result =
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Isolate::kProtectorValid));
+ Goto(&exit);
}
BIND(&if_false);
{
@@ -997,7 +986,7 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
Label* if_true,
Label* if_false) {
- GotoIf(IsSpeciesProtectorCellInvalid(), if_false);
+ GotoIf(IsArraySpeciesProtectorCellInvalid(), if_false);
BranchIfFastJSArray(object, context, if_true, if_false);
}
@@ -1264,12 +1253,14 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType rep) {
+ AssertIsStrongHeapObject(object);
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType rep) {
+ AssertIsStrongHeapObject(object);
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
@@ -1363,21 +1354,11 @@ TNode<Int32T> CodeStubAssembler::LoadInstanceType(
return LoadMapInstanceType(LoadMap(object));
}
-Node* CodeStubAssembler::HasInstanceType(Node* object,
- InstanceType instance_type) {
+TNode<BoolT> CodeStubAssembler::HasInstanceType(SloppyTNode<HeapObject> object,
+ InstanceType instance_type) {
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
-TNode<BoolT> CodeStubAssembler::HasInitialArrayIteratorPrototypeMap(
- TNode<Context> native_context) {
- CSA_ASSERT(this, IsNativeContext(native_context));
- TNode<Map> arr_it_proto_map = LoadMap(CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)));
- TNode<Map> initial_map = CAST(LoadContextElement(
- native_context, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
- return WordEqual(arr_it_proto_map, initial_map);
-}
-
TNode<BoolT>
CodeStubAssembler::InitialArrayPrototypeHasInitialArrayPrototypeMap(
TNode<Context> native_context) {
@@ -1399,34 +1380,38 @@ TNode<BoolT> CodeStubAssembler::HasInitialFastElementsKindMap(
return WordEqual(initial_jsarray_element_map, map);
}
-Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
- InstanceType instance_type) {
+TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType(
+ SloppyTNode<HeapObject> object, InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
-Node* CodeStubAssembler::TaggedDoesntHaveInstanceType(Node* any_tagged,
- InstanceType type) {
+TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
+ SloppyTNode<HeapObject> any_tagged, InstanceType type) {
/* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */
- Node* tagged_is_smi = TaggedIsSmi(any_tagged);
- return Select(tagged_is_smi, [=]() { return tagged_is_smi; },
- [=]() { return DoesntHaveInstanceType(any_tagged, type); },
- MachineRepresentation::kBit);
+ TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged);
+ return Select<BoolT>(
+ tagged_is_smi, [=]() { return tagged_is_smi; },
+ [=]() { return DoesntHaveInstanceType(any_tagged, type); });
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32Not(IsDictionaryMap(LoadMap(object))));
- Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
- return SelectTaggedConstant<HeapObject>(
- TaggedIsSmi(properties), EmptyFixedArrayConstant(), properties);
+ TNode<Object> properties =
+ LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
+ return Select<HeapObject>(TaggedIsSmi(properties),
+ [=] { return EmptyFixedArrayConstant(); },
+ [=] { return CAST(properties); });
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
- Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
- return SelectTaggedConstant<HeapObject>(
- TaggedIsSmi(properties), EmptyPropertyDictionaryConstant(), properties);
+ TNode<Object> properties =
+ LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
+ return Select<HeapObject>(TaggedIsSmi(properties),
+ [=] { return EmptyPropertyDictionaryConstant(); },
+ [=] { return CAST(properties); });
}
TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
@@ -1434,9 +1419,9 @@ TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
return CAST(LoadObjectField(object, JSObject::kElementsOffset));
}
-TNode<Object> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
+TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
- return LoadObjectField(array, JSArray::kLengthOffset);
+ return CAST(LoadObjectField(array, JSArray::kLengthOffset));
}
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
@@ -1500,14 +1485,20 @@ TNode<Object> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
SloppyTNode<Map> map, Label* if_no_proto_info) {
+ Label if_strong_heap_object(this);
CSA_ASSERT(this, IsMap(map));
- Node* prototype_info =
+ Node* maybe_prototype_info =
LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
- GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
- GotoIfNot(WordEqual(LoadMap(prototype_info),
+ VARIABLE(prototype_info, MachineRepresentation::kTagged);
+ DispatchMaybeObject(maybe_prototype_info, if_no_proto_info, if_no_proto_info,
+ if_no_proto_info, &if_strong_heap_object,
+ &prototype_info);
+
+ BIND(&if_strong_heap_object);
+ GotoIfNot(WordEqual(LoadMap(prototype_info.value()),
LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
if_no_proto_info);
- return CAST(prototype_info);
+ return CAST(prototype_info.value());
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
@@ -1564,11 +1555,11 @@ Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
}
-Node* CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
- Node* object = LoadObjectField(map, Map::kConstructorOrBackPointerOffset);
- return Select(IsMap(object), [=] { return object; },
- [=] { return UndefinedConstant(); },
- MachineRepresentation::kTagged);
+TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
+ TNode<Object> object =
+ LoadObjectField(map, Map::kConstructorOrBackPointerOffset);
+ return Select<Object>(IsMap(object), [=] { return object; },
+ [=] { return UndefinedConstant(); });
}
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
@@ -1614,8 +1605,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
BIND(&if_property_dictionary);
{
- var_hash = SmiUntag(
- LoadFixedArrayElement(properties, NameDictionary::kObjectHashIndex));
+ var_hash = SmiUntag(CAST(
+ LoadFixedArrayElement(properties, NameDictionary::kObjectHashIndex)));
Goto(&done);
}
@@ -1685,17 +1676,49 @@ TNode<Object> CodeStubAssembler::LoadWeakCellValue(
return value;
}
-Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
- int additional_offset,
- ParameterMode parameter_mode) {
+void CodeStubAssembler::DispatchMaybeObject(Node* maybe_object, Label* if_smi,
+ Label* if_cleared, Label* if_weak,
+ Label* if_strong,
+ Variable* extracted) {
+ Label inner_if_smi(this), inner_if_strong(this);
+
+ GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi);
+
+ GotoIf(WordEqual(maybe_object, IntPtrConstant(reinterpret_cast<intptr_t>(
+ HeapObjectReference::ClearedValue()))),
+ if_cleared);
+
+ GotoIf(WordEqual(WordAnd(BitcastTaggedToWord(maybe_object),
+ IntPtrConstant(kWeakHeapObjectMask)),
+ IntPtrConstant(0)),
+ &inner_if_strong);
+
+ extracted->Bind(
+ BitcastWordToTagged(WordAnd(BitcastTaggedToWord(maybe_object),
+ IntPtrConstant(~kWeakHeapObjectMask))));
+ Goto(if_weak);
+
+ BIND(&inner_if_smi);
+ extracted->Bind(maybe_object);
+ Goto(if_smi);
+
+ BIND(&inner_if_strong);
+ extracted->Bind(maybe_object);
+ Goto(if_strong);
+}
+
+TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
+ SloppyTNode<Object> object, Node* index_node, int additional_offset,
+ ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
- parameter_mode, header_size);
- return Load(MachineType::AnyTagged(), object, offset);
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
+ parameter_mode, header_size);
+ return UncheckedCast<Object>(
+ Load(MachineType::AnyTagged(), object, offset, needs_poisoning));
}
TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
@@ -1746,11 +1769,19 @@ Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
TVARIABLE(IntPtrT, var_low);
TVARIABLE(IntPtrT, var_high);
+#if defined(V8_TARGET_BIG_ENDIAN)
+ var_high = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ var_low = UncheckedCast<IntPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+#else
var_low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
var_high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
+#endif
Label high_zero(this), negative(this), allocate_one_digit(this),
allocate_two_digits(this);
@@ -1829,11 +1860,19 @@ Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
DCHECK(!Is64());
Label high_zero(this);
+#if defined(V8_TARGET_BIG_ENDIAN)
+ TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer, offset));
+ TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
+ Load(MachineType::UintPtr(), data_pointer,
+ Int32Add(offset, Int32Constant(kPointerSize))));
+#else
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
+#endif
GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
var_result = AllocateBigInt(IntPtrConstant(2));
@@ -1903,8 +1942,8 @@ TNode<Object> CodeStubAssembler::LoadFeedbackVectorSlot(
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), object, offset));
}
-Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
- Node* object, Node* index_node, int additional_offset,
+TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
+ SloppyTNode<Object> object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
@@ -1918,7 +1957,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (Is64()) {
- return Load(MachineType::Int32(), object, offset);
+ return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
} else {
return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
}
@@ -2071,6 +2110,26 @@ Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
return var_result.value();
}
+Node* CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(Node* shared) {
+ CSA_ASSERT(this, TaggedIsNotSmi(shared));
+ CSA_ASSERT(this, IsSharedFunctionInfo(shared));
+
+ Node* function_data =
+ LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged, function_data);
+ Label done(this, &var_result);
+
+ GotoIfNot(HasInstanceType(function_data, INTERPRETER_DATA_TYPE), &done);
+ Node* bytecode_array =
+ LoadObjectField(function_data, InterpreterData::kBytecodeArrayOffset);
+ var_result.Bind(bytecode_array);
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value) {
StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
@@ -2136,14 +2195,26 @@ Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
}
}
+Node* CodeStubAssembler::StoreJSArrayLength(TNode<JSArray> array,
+ TNode<Smi> length) {
+ return StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+}
+
+Node* CodeStubAssembler::StoreElements(TNode<Object> object,
+ TNode<FixedArrayBase> elements) {
+ return StoreObjectField(object, JSObject::kElementsOffset, elements);
+}
+
Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* value,
WriteBarrierMode barrier_mode,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(
- this, Word32Or(IsHashTable(object),
- Word32Or(IsFixedArray(object), IsPropertyArray(object))));
+ this,
+ Word32Or(IsHashTable(object),
+ Word32Or(IsFixedArray(object),
+ Word32Or(IsPropertyArray(object), IsContext(object)))));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
@@ -2193,36 +2264,45 @@ Node* CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
}
}
-void CodeStubAssembler::EnsureArrayLengthWritable(Node* map, Label* bailout) {
+void CodeStubAssembler::EnsureArrayLengthWritable(TNode<Map> map,
+ Label* bailout) {
+ // Don't support arrays in dictionary named property mode.
+ GotoIf(IsDictionaryMap(map), bailout);
+
// Check whether the length property is writable. The length property is the
// only default named property on arrays. It's nonconfigurable, hence is
// guaranteed to stay the first property.
- Node* descriptors = LoadMapDescriptors(map);
- Node* details =
- LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
- GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), bailout);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+
+ int length_index = JSArray::kLengthDescriptorIndex;
+#ifdef DEBUG
+ TNode<Name> maybe_length = CAST(LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index)));
+ CSA_ASSERT(this,
+ WordEqual(maybe_length, LoadRoot(Heap::klength_stringRootIndex)));
+#endif
+
+ TNode<Int32T> details = LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorArray::ToDetailsIndex(length_index));
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ bailout);
}
-Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) {
+TNode<Int32T> CodeStubAssembler::EnsureArrayPushable(TNode<Map> map,
+ Label* bailout) {
// Disallow pushing onto prototypes. It might be the JSArray prototype.
// Disallow pushing onto non-extensible objects.
Comment("Disallow pushing onto prototypes");
- Node* map = LoadMap(receiver);
Node* bit_field2 = LoadMapBitField2(map);
int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
Node* test = Word32And(bit_field2, Int32Constant(mask));
GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
bailout);
- // Disallow pushing onto arrays in dictionary named property mode. We need
- // to figure out whether the length property is still writable.
- Comment("Disallow pushing onto arrays in dictionary named property mode");
- GotoIf(IsDictionaryMap(map), bailout);
-
EnsureArrayLengthWritable(map, bailout);
- Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
- return kind;
+ TNode<Uint32T> kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ return Signed(kind);
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
@@ -2387,8 +2467,7 @@ TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
TNode<BigInt> CodeStubAssembler::AllocateBigInt(TNode<IntPtrT> length) {
TNode<BigInt> result = AllocateRawBigInt(length);
- STATIC_ASSERT(BigInt::LengthBits::kShift == 0);
- StoreBigIntBitfield(result, length);
+ StoreBigIntBitfield(result, WordShl(length, BigInt::LengthBits::kShift));
return result;
}
@@ -2447,12 +2526,12 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
return CAST(result);
}
-Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
+Node* CodeStubAssembler::IsZeroOrContext(Node* object) {
Label out(this);
VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
GotoIf(WordEqual(object, SmiConstant(0)), &out);
- GotoIf(IsFixedArray(object), &out);
+ GotoIf(IsContext(object), &out);
var_result.Bind(Int32Constant(0));
Goto(&out);
@@ -2464,7 +2543,7 @@ Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
Node* context, TNode<Smi> length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
- CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
+ CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
@@ -2533,7 +2612,7 @@ TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
TNode<String> CodeStubAssembler::AllocateSeqTwoByteString(
Node* context, TNode<Smi> length, AllocationFlags flags) {
- CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
+ CSA_SLOW_ASSERT(this, IsZeroOrContext(context));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
@@ -2659,7 +2738,7 @@ TNode<String> CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
TNode<String> left,
TNode<String> right,
AllocationFlags flags) {
- CSA_ASSERT(this, IsFixedArray(context));
+ CSA_ASSERT(this, IsContext(context));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
@@ -3160,10 +3239,9 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
return array;
}
-Node* CodeStubAssembler::ExtractFixedArray(Node* fixed_array, Node* first,
- Node* count, Node* capacity,
- ExtractFixedArrayFlags extract_flags,
- ParameterMode parameter_mode) {
+TNode<FixedArray> CodeStubAssembler::ExtractFixedArray(
+ Node* fixed_array, Node* first, Node* count, Node* capacity,
+ ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode) {
VARIABLE(var_result, MachineRepresentation::kTagged);
VARIABLE(var_fixed_array_map, MachineRepresentation::kTagged);
const AllocationFlags flags =
@@ -3304,7 +3382,7 @@ Node* CodeStubAssembler::ExtractFixedArray(Node* fixed_array, Node* first,
}
BIND(&done);
- return var_result.value();
+ return UncheckedCast<FixedArray>(var_result.value());
}
void CodeStubAssembler::InitializePropertyArrayLength(Node* property_array,
@@ -3534,6 +3612,18 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
+TNode<FixedArray> CodeStubAssembler::ConvertFixedArrayBaseToFixedArray(
+ TNode<FixedArrayBase> base, Label* cast_fail) {
+ Label fixed_array(this);
+ TNode<Map> map = LoadMap(base);
+ GotoIf(WordEqual(map, LoadRoot(Heap::kFixedArrayMapRootIndex)), &fixed_array);
+ GotoIf(WordNotEqual(map, LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ cast_fail);
+ Goto(&fixed_array);
+ BIND(&fixed_array);
+ return UncheckedCast<FixedArray>(base);
+}
+
void CodeStubAssembler::CopyPropertyArrayValues(Node* from_array,
Node* to_array,
Node* property_count,
@@ -4310,7 +4400,8 @@ void CodeStubAssembler::ThrowTypeError(Node* context,
Unreachable();
}
-Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
+TNode<BoolT> CodeStubAssembler::InstanceTypeEqual(
+ SloppyTNode<Int32T> instance_type, int type) {
return Word32Equal(instance_type, Int32Constant(type));
}
@@ -4347,6 +4438,13 @@ Node* CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
+Node* CodeStubAssembler::IsPromiseResolveProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kPromiseResolveProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, Cell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
Node* CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
Node* cell = LoadRoot(Heap::kPromiseThenProtectorRootIndex);
@@ -4354,9 +4452,23 @@ Node* CodeStubAssembler::IsPromiseThenProtectorCellInvalid() {
return WordEqual(cell_value, invalid);
}
-Node* CodeStubAssembler::IsSpeciesProtectorCellInvalid() {
+Node* CodeStubAssembler::IsArraySpeciesProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kArraySpeciesProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
+Node* CodeStubAssembler::IsTypedArraySpeciesProtectorCellInvalid() {
Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
- Node* cell = LoadRoot(Heap::kSpeciesProtectorRootIndex);
+ Node* cell = LoadRoot(Heap::kTypedArraySpeciesProtectorRootIndex);
+ Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return WordEqual(cell_value, invalid);
+}
+
+Node* CodeStubAssembler::IsPromiseSpeciesProtectorCellInvalid() {
+ Node* invalid = SmiConstant(Isolate::kProtectorInvalid);
+ Node* cell = LoadRoot(Heap::kPromiseSpeciesProtectorRootIndex);
Node* cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
return WordEqual(cell_value, invalid);
}
@@ -4378,7 +4490,7 @@ TNode<BoolT> CodeStubAssembler::IsPrototypeTypedArrayPrototype(
TNode<Object> proto = LoadMapPrototype(map);
TNode<Object> proto_of_proto = Select<Object>(
IsJSObject(proto), [=] { return LoadMapPrototype(LoadMap(CAST(proto))); },
- [=] { return NullConstant(); }, MachineRepresentation::kTagged);
+ [=] { return NullConstant(); });
return WordEqual(proto_of_proto, typed_array_prototype);
}
@@ -4408,7 +4520,8 @@ Node* CodeStubAssembler::IsFunctionWithPrototypeSlotMap(Node* map) {
return IsSetWord32<Map::HasPrototypeSlotBit>(LoadMapBitField(map));
}
-Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
+TNode<BoolT> CodeStubAssembler::IsSpecialReceiverInstanceType(
+ Node* instance_type) {
STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
return Int32LessThanOrEqual(instance_type,
Int32Constant(LAST_SPECIAL_RECEIVER_TYPE));
@@ -4467,12 +4580,6 @@ Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
-Node* CodeStubAssembler::IsArrayIteratorInstanceType(Node* instance_type) {
- return Uint32LessThan(
- Int32Constant(LAST_ARRAY_ITERATOR_TYPE - FIRST_ARRAY_ITERATOR_TYPE),
- Int32Sub(instance_type, Int32Constant(FIRST_ARRAY_ITERATOR_TYPE)));
-}
-
Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
return IsJSReceiverInstanceType(LoadMapInstanceType(map));
}
@@ -4551,10 +4658,21 @@ Node* CodeStubAssembler::IsJSArrayMap(Node* map) {
return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
+Node* CodeStubAssembler::IsJSArrayIterator(Node* object) {
+ return HasInstanceType(object, JS_ARRAY_ITERATOR_TYPE);
+}
+
Node* CodeStubAssembler::IsJSAsyncGeneratorObject(Node* object) {
return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE);
}
+Node* CodeStubAssembler::IsContext(Node* object) {
+ Node* instance_type = LoadInstanceType(object);
+ return Word32And(
+ Int32GreaterThanOrEqual(instance_type, Int32Constant(FIRST_CONTEXT_TYPE)),
+ Int32LessThanOrEqual(instance_type, Int32Constant(LAST_CONTEXT_TYPE)));
+}
+
Node* CodeStubAssembler::IsFixedArray(Node* object) {
return HasInstanceType(object, FIXED_ARRAY_TYPE);
}
@@ -4638,7 +4756,7 @@ Node* CodeStubAssembler::IsAnyHeapNumber(Node* object) {
return Word32Or(IsMutableHeapNumber(object), IsHeapNumber(object));
}
-Node* CodeStubAssembler::IsHeapNumber(Node* object) {
+TNode<BoolT> CodeStubAssembler::IsHeapNumber(Node* object) {
return IsHeapNumberMap(LoadMap(object));
}
@@ -4684,8 +4802,8 @@ Node* CodeStubAssembler::IsPrimitiveInstanceType(Node* instance_type) {
Int32Constant(LAST_PRIMITIVE_TYPE));
}
-Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
- return Select(
+TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(Node* object) {
+ return Select<BoolT>(
IsSymbol(object),
[=] {
TNode<Symbol> symbol = CAST(object);
@@ -4693,7 +4811,7 @@ Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
SmiToInt32(LoadObjectField<Smi>(symbol, Symbol::kFlagsOffset));
return IsSetWord32(flags, 1 << Symbol::kPrivateBit);
},
- [=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
+ [=] { return Int32FalseConstant(); });
}
Node* CodeStubAssembler::IsNativeContext(Node* object) {
@@ -4754,65 +4872,54 @@ Node* CodeStubAssembler::IsJSRegExp(Node* object) {
return HasInstanceType(object, JS_REGEXP_TYPE);
}
-Node* CodeStubAssembler::IsNumeric(Node* object) {
- return Select(
- TaggedIsSmi(object), [=] { return Int32Constant(1); },
- [=] { return Word32Or(IsHeapNumber(object), IsBigInt(object)); },
- MachineRepresentation::kWord32);
+TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) {
+ return Select<BoolT>(TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
+ [=] { return IsHeapNumber(object); });
}
-Node* CodeStubAssembler::IsNumber(Node* object) {
- return Select(TaggedIsSmi(object), [=] { return Int32Constant(1); },
- [=] { return IsHeapNumber(object); },
- MachineRepresentation::kWord32);
-}
-
-Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
- int base_size,
- ParameterMode mode) {
- int max_newspace_elements =
- (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
- return IntPtrOrSmiGreaterThan(
- element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode);
+TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
+ return Select<BoolT>(TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
+ [=] {
+ return UncheckedCast<BoolT>(
+ Word32Or(IsHeapNumber(object), IsBigInt(object)));
+ });
}
-Node* CodeStubAssembler::IsNumberNormalized(Node* number) {
- CSA_ASSERT(this, IsNumber(number));
-
- VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
+TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
+ TVARIABLE(BoolT, var_result, Int32TrueConstant());
Label out(this);
GotoIf(TaggedIsSmi(number), &out);
- Node* const value = LoadHeapNumberValue(number);
- Node* const smi_min = Float64Constant(static_cast<double>(Smi::kMinValue));
- Node* const smi_max = Float64Constant(static_cast<double>(Smi::kMaxValue));
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(number));
+ TNode<Float64T> smi_min =
+ Float64Constant(static_cast<double>(Smi::kMinValue));
+ TNode<Float64T> smi_max =
+ Float64Constant(static_cast<double>(Smi::kMaxValue));
GotoIf(Float64LessThan(value, smi_min), &out);
GotoIf(Float64GreaterThan(value, smi_max), &out);
GotoIfNot(Float64Equal(value, value), &out); // NaN.
- var_result.Bind(Int32Constant(0));
+ var_result = Int32FalseConstant();
Goto(&out);
BIND(&out);
return var_result.value();
}
-Node* CodeStubAssembler::IsNumberPositive(Node* number) {
- CSA_ASSERT(this, IsNumber(number));
- Node* const float_zero = Float64Constant(0.);
- return Select(TaggedIsSmi(number),
- [=] { return TaggedIsPositiveSmi(number); },
- [=] {
- Node* v = LoadHeapNumberValue(number);
- return Float64GreaterThanOrEqual(v, float_zero);
- },
- MachineRepresentation::kWord32);
+TNode<BoolT> CodeStubAssembler::IsNumberPositive(SloppyTNode<Number> number) {
+ TNode<Float64T> float_zero = Float64Constant(0.);
+ return Select<BoolT>(TaggedIsSmi(number),
+ [=] { return TaggedIsPositiveSmi(number); },
+ [=] {
+ TNode<Float64T> v = LoadHeapNumberValue(CAST(number));
+ return Float64GreaterThanOrEqual(v, float_zero);
+ });
}
-Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
- VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
+TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(SloppyTNode<Number> number) {
+ TVARIABLE(BoolT, var_result, Int32TrueConstant());
Label check_upper_bound(this), check_is_integer(this), out(this),
return_false(this);
@@ -4828,19 +4935,28 @@ Node* CodeStubAssembler::IsNumberArrayIndex(Node* number) {
BIND(&check_is_integer);
GotoIf(TaggedIsSmi(number), &out);
// Check that the HeapNumber is a valid uint32
- Node* value = LoadHeapNumberValue(number);
- Node* int_value = ChangeFloat64ToUint32(value);
+ TNode<Float64T> value = LoadHeapNumberValue(CAST(number));
+ TNode<Uint32T> int_value = ChangeFloat64ToUint32(value);
GotoIf(Float64Equal(value, ChangeUint32ToFloat64(int_value)), &out);
Goto(&return_false);
BIND(&return_false);
- var_result.Bind(Int32Constant(0));
+ var_result = Int32FalseConstant();
Goto(&out);
BIND(&out);
return var_result.value();
}
+Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
+ int base_size,
+ ParameterMode mode) {
+ int max_newspace_elements =
+ (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
+ return IntPtrOrSmiGreaterThan(
+ element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode);
+}
+
TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
SloppyTNode<IntPtrT> index) {
CSA_ASSERT(this, IsString(string));
@@ -4891,7 +5007,7 @@ TNode<Int32T> CodeStubAssembler::StringCharCodeAt(SloppyTNode<String> string,
return var_result.value();
}
-TNode<String> CodeStubAssembler::StringFromCharCode(TNode<Int32T> code) {
+TNode<String> CodeStubAssembler::StringFromSingleCharCode(TNode<Int32T> code) {
VARIABLE(var_result, MachineRepresentation::kTagged);
// Check if the {code} is a one-byte char code.
@@ -5089,7 +5205,7 @@ TNode<String> CodeStubAssembler::SubString(TNode<String> string,
BIND(&single_char);
{
TNode<Int32T> char_code = StringCharCodeAt(string, from);
- var_result = StringFromCharCode(char_code);
+ var_result = StringFromSingleCharCode(char_code);
Goto(&end);
}
@@ -5282,6 +5398,16 @@ void CodeStubAssembler::BranchIfCanDerefIndirectString(Node* string,
Goto(cannot_deref);
}
+Node* CodeStubAssembler::DerefIndirectString(TNode<String> string,
+ TNode<Int32T> instance_type,
+ Label* cannot_deref) {
+ Label deref(this);
+ BranchIfCanDerefIndirectString(string, instance_type, &deref, cannot_deref);
+ BIND(&deref);
+ STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
+ return LoadObjectField(string, ThinString::kActualOffset);
+}
+
void CodeStubAssembler::DerefIndirectString(Variable* var_string,
Node* instance_type) {
#ifdef DEBUG
@@ -5454,8 +5580,8 @@ TNode<String> CodeStubAssembler::StringAdd(Node* context, TNode<String> left,
return result.value();
}
-TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
- UnicodeEncoding encoding) {
+TNode<String> CodeStubAssembler::StringFromSingleCodePoint(
+ TNode<Int32T> codepoint, UnicodeEncoding encoding) {
VARIABLE(var_result, MachineRepresentation::kTagged, EmptyStringConstant());
Label if_isword16(this), if_isword32(this), return_result(this);
@@ -5465,7 +5591,7 @@ TNode<String> CodeStubAssembler::StringFromCodePoint(TNode<Int32T> codepoint,
BIND(&if_isword16);
{
- var_result.Bind(StringFromCharCode(codepoint));
+ var_result.Bind(StringFromSingleCharCode(codepoint));
Goto(&return_result);
}
@@ -5607,25 +5733,26 @@ TNode<String> CodeStubAssembler::NumberToString(TNode<Number> input) {
return result.value();
}
-Node* CodeStubAssembler::ToName(Node* context, Node* value) {
+TNode<Name> CodeStubAssembler::ToName(SloppyTNode<Context> context,
+ SloppyTNode<Object> value) {
Label end(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
+ TVARIABLE(Name, var_result);
Label is_number(this);
GotoIf(TaggedIsSmi(value), &is_number);
Label not_name(this);
- Node* value_instance_type = LoadInstanceType(value);
+ TNode<Int32T> value_instance_type = LoadInstanceType(CAST(value));
STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
GotoIf(Int32GreaterThan(value_instance_type, Int32Constant(LAST_NAME_TYPE)),
&not_name);
- var_result.Bind(value);
+ var_result = CAST(value);
Goto(&end);
BIND(&is_number);
{
- var_result.Bind(CallBuiltin(Builtins::kNumberToString, context, value));
+ var_result = CAST(CallBuiltin(Builtins::kNumberToString, context, value));
Goto(&end);
}
@@ -5638,18 +5765,17 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
GotoIfNot(InstanceTypeEqual(value_instance_type, ODDBALL_TYPE),
&not_oddball);
- var_result.Bind(LoadObjectField(value, Oddball::kToStringOffset));
+ var_result = LoadObjectField<String>(CAST(value), Oddball::kToStringOffset);
Goto(&end);
BIND(&not_oddball);
{
- var_result.Bind(CallRuntime(Runtime::kToName, context, value));
+ var_result = CAST(CallRuntime(Runtime::kToName, context, value));
Goto(&end);
}
}
BIND(&end);
- CSA_ASSERT(this, IsName(var_result.value()));
return var_result.value();
}
@@ -5799,8 +5925,7 @@ TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
[=] {
return CAST(CallBuiltin(Builtins::kNonNumberToNumber,
context, input));
- },
- MachineRepresentation::kTagged);
+ });
Goto(&end);
}
@@ -6167,23 +6292,23 @@ TNode<Smi> CodeStubAssembler::ToSmiLength(TNode<Object> input,
return result.value();
}
-Node* CodeStubAssembler::ToLength_Inline(Node* const context,
- Node* const input) {
- Node* const smi_zero = SmiConstant(0);
- return Select(
- TaggedIsSmi(input), [=] { return SmiMax(input, smi_zero); },
- [=] { return CallBuiltin(Builtins::kToLength, context, input); },
- MachineRepresentation::kTagged);
+TNode<Number> CodeStubAssembler::ToLength_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input) {
+ TNode<Smi> smi_zero = SmiConstant(0);
+ return Select<Number>(
+ TaggedIsSmi(input), [=] { return SmiMax(CAST(input), smi_zero); },
+ [=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); });
}
TNode<Number> CodeStubAssembler::ToInteger_Inline(
- TNode<Context> context, TNode<Object> input, ToIntegerTruncationMode mode) {
+ SloppyTNode<Context> context, SloppyTNode<Object> input,
+ ToIntegerTruncationMode mode) {
Builtins::Name builtin = (mode == kNoTruncation)
? Builtins::kToInteger
: Builtins::kToInteger_TruncateMinusZero;
- return CAST(Select(TaggedIsSmi(input), [=] { return input; },
- [=] { return CallBuiltin(builtin, context, input); },
- MachineRepresentation::kTagged));
+ return Select<Number>(
+ TaggedIsSmi(input), [=] { return CAST(input); },
+ [=] { return CAST(CallBuiltin(builtin, context, input)); });
}
TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
@@ -6244,8 +6369,9 @@ TNode<Number> CodeStubAssembler::ToInteger(SloppyTNode<Context> context,
}
BIND(&out);
- if (mode == kTruncateMinusZero)
- CSA_ASSERT(this, IsNumberNormalized(var_arg.value()));
+ if (mode == kTruncateMinusZero) {
+ CSA_ASSERT(this, IsNumberNormalized(CAST(var_arg.value())));
+ }
return CAST(var_arg.value());
}
@@ -6422,8 +6548,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMax(SloppyTNode<IntPtrT> left,
ToIntPtrConstant(right, right_constant)) {
return IntPtrConstant(std::max(left_constant, right_constant));
}
- return SelectConstant(IntPtrGreaterThanOrEqual(left, right), left, right,
- MachineType::PointerRepresentation());
+ return SelectConstant<IntPtrT>(IntPtrGreaterThanOrEqual(left, right), left,
+ right);
}
TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
@@ -6434,8 +6560,8 @@ TNode<IntPtrT> CodeStubAssembler::IntPtrMin(SloppyTNode<IntPtrT> left,
ToIntPtrConstant(right, right_constant)) {
return IntPtrConstant(std::min(left_constant, right_constant));
}
- return SelectConstant(IntPtrLessThanOrEqual(left, right), left, right,
- MachineType::PointerRepresentation());
+ return SelectConstant<IntPtrT>(IntPtrLessThanOrEqual(left, right), left,
+ right);
}
template <class Dictionary>
@@ -6725,109 +6851,148 @@ void CodeStubAssembler::Add(Node* dictionary, Node* key, Node* value,
template void CodeStubAssembler::Add<NameDictionary>(Node*, Node*, Node*,
Label*);
-void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
- Node* descriptors, Node* nof,
- Label* if_found,
- Variable* var_name_index,
- Label* if_not_found) {
- Comment("DescriptorLookupLinear");
- Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
- Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
- Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
+template <typename Array>
+void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
+ TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
+ Comment("LookupLinear");
+ TNode<IntPtrT> first_inclusive = IntPtrConstant(Array::ToKeyIndex(0));
+ TNode<IntPtrT> factor = IntPtrConstant(Array::kEntrySize);
+ TNode<IntPtrT> last_exclusive = IntPtrAdd(
+ first_inclusive,
+ IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
BuildFastLoop(last_exclusive, first_inclusive,
- [this, descriptors, unique_name, if_found,
- var_name_index](Node* name_index) {
- Node* candidate_name =
- LoadFixedArrayElement(descriptors, name_index);
- var_name_index->Bind(name_index);
+ [=](SloppyTNode<IntPtrT> name_index) {
+ TNode<Name> candidate_name =
+ CAST(LoadFixedArrayElement(array, name_index));
+ *var_name_index = name_index;
GotoIf(WordEqual(candidate_name, unique_name), if_found);
},
- -DescriptorArray::kEntrySize, INTPTR_PARAMETERS,
- IndexAdvanceMode::kPre);
+ -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
-Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
- return LoadAndUntagToWord32FixedArrayElement(
- descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
+template <>
+TNode<Uint32T> CodeStubAssembler::NumberOfEntries<DescriptorArray>(
+ TNode<DescriptorArray> descriptors) {
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex)));
+}
+
+template <>
+TNode<Uint32T> CodeStubAssembler::NumberOfEntries<TransitionArray>(
+ TNode<TransitionArray> transitions) {
+ TNode<IntPtrT> length = LoadAndUntagFixedArrayBaseLength(transitions);
+ return Select<Uint32T>(
+ UintPtrLessThan(length, IntPtrConstant(TransitionArray::kFirstIndex)),
+ [=] { return Unsigned(Int32Constant(0)); },
+ [=] {
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(
+ transitions,
+ IntPtrConstant(TransitionArray::kTransitionLengthIndex)));
+ });
}
-Node* CodeStubAssembler::DescriptorNumberToIndex(
- SloppyTNode<Uint32T> descriptor_number) {
- Node* descriptor_size = Int32Constant(DescriptorArray::kEntrySize);
- Node* index = Int32Mul(descriptor_number, descriptor_size);
+template <typename Array>
+TNode<IntPtrT> CodeStubAssembler::EntryIndexToIndex(
+ TNode<Uint32T> entry_index) {
+ TNode<Int32T> entry_size = Int32Constant(Array::kEntrySize);
+ TNode<Word32T> index = Int32Mul(entry_index, entry_size);
return ChangeInt32ToIntPtr(index);
}
-Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
- return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
- DescriptorNumberToIndex(descriptor_number));
+template <typename Array>
+TNode<IntPtrT> CodeStubAssembler::ToKeyIndex(TNode<Uint32T> entry_index) {
+ return IntPtrAdd(IntPtrConstant(Array::ToKeyIndex(0)),
+ EntryIndexToIndex<Array>(entry_index));
}
-Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
- Node* descriptors, Node* descriptor_number) {
- Node* details = DescriptorArrayGetDetails(
- TNode<DescriptorArray>::UncheckedCast(descriptors),
- TNode<Uint32T>::UncheckedCast(descriptor_number));
+template TNode<IntPtrT> CodeStubAssembler::ToKeyIndex<DescriptorArray>(
+ TNode<Uint32T>);
+template TNode<IntPtrT> CodeStubAssembler::ToKeyIndex<TransitionArray>(
+ TNode<Uint32T>);
+
+template <>
+TNode<Uint32T> CodeStubAssembler::GetSortedKeyIndex<DescriptorArray>(
+ TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
+ TNode<Uint32T> details =
+ DescriptorArrayGetDetails(descriptors, descriptor_number);
return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
}
-Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
- Node* descriptor_number) {
+template <>
+TNode<Uint32T> CodeStubAssembler::GetSortedKeyIndex<TransitionArray>(
+ TNode<TransitionArray> transitions, TNode<Uint32T> transition_number) {
+ return transition_number;
+}
+
+template <typename Array>
+TNode<Name> CodeStubAssembler::GetKey(TNode<Array> array,
+ TNode<Uint32T> entry_index) {
const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
- return LoadFixedArrayElement(
- descriptors, DescriptorNumberToIndex(descriptor_number), key_offset);
+ return CAST(LoadFixedArrayElement(
+ array, EntryIndexToIndex<Array>(entry_index), key_offset));
}
+template TNode<Name> CodeStubAssembler::GetKey<DescriptorArray>(
+ TNode<DescriptorArray>, TNode<Uint32T>);
+template TNode<Name> CodeStubAssembler::GetKey<TransitionArray>(
+ TNode<TransitionArray>, TNode<Uint32T>);
+
TNode<Uint32T> CodeStubAssembler::DescriptorArrayGetDetails(
TNode<DescriptorArray> descriptors, TNode<Uint32T> descriptor_number) {
const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
- return TNode<Uint32T>::UncheckedCast(LoadAndUntagToWord32FixedArrayElement(
- descriptors, DescriptorNumberToIndex(descriptor_number), details_offset));
-}
-
-void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
- Node* descriptors, Node* nof,
- Label* if_found,
- Variable* var_name_index,
- Label* if_not_found) {
- Comment("DescriptorLookupBinary");
- VARIABLE(var_low, MachineRepresentation::kWord32, Int32Constant(0));
- Node* limit =
- Int32Sub(DescriptorArrayNumberOfEntries(descriptors), Int32Constant(1));
- VARIABLE(var_high, MachineRepresentation::kWord32, limit);
- Node* hash = LoadNameHashField(unique_name);
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(
+ descriptors, EntryIndexToIndex<DescriptorArray>(descriptor_number),
+ details_offset));
+}
+
+template <typename Array>
+void CodeStubAssembler::LookupBinary(TNode<Name> unique_name,
+ TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
+ Comment("LookupBinary");
+ TVARIABLE(Uint32T, var_low, Unsigned(Int32Constant(0)));
+ TNode<Uint32T> limit =
+ Unsigned(Int32Sub(NumberOfEntries<Array>(array), Int32Constant(1)));
+ TVARIABLE(Uint32T, var_high, limit);
+ TNode<Uint32T> hash = LoadNameHashField(unique_name);
CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0)));
// Assume non-empty array.
CSA_ASSERT(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
- Variable* loop_vars[] = {&var_high, &var_low};
- Label binary_loop(this, 2, loop_vars);
+ Label binary_loop(this, {&var_high, &var_low});
Goto(&binary_loop);
BIND(&binary_loop);
{
// mid = low + (high - low) / 2 (to avoid overflow in "(low + high) / 2").
- Node* mid =
+ TNode<Uint32T> mid = Unsigned(
Int32Add(var_low.value(),
- Word32Shr(Int32Sub(var_high.value(), var_low.value()), 1));
- // mid_name = descriptors->GetSortedKey(mid).
- Node* sorted_key_index = DescriptorArrayGetSortedKeyIndex(descriptors, mid);
- Node* mid_name = DescriptorArrayGetKey(descriptors, sorted_key_index);
+ Word32Shr(Int32Sub(var_high.value(), var_low.value()), 1)));
+ // mid_name = array->GetSortedKey(mid).
+ TNode<Uint32T> sorted_key_index = GetSortedKeyIndex<Array>(array, mid);
+ TNode<Name> mid_name = GetKey<Array>(array, sorted_key_index);
- Node* mid_hash = LoadNameHashField(mid_name);
+ TNode<Uint32T> mid_hash = LoadNameHashField(mid_name);
Label mid_greater(this), mid_less(this), merge(this);
Branch(Uint32GreaterThanOrEqual(mid_hash, hash), &mid_greater, &mid_less);
BIND(&mid_greater);
{
- var_high.Bind(mid);
+ var_high = mid;
Goto(&merge);
}
BIND(&mid_less);
{
- var_low.Bind(Int32Add(mid, Int32Constant(1)));
+ var_low = Unsigned(Int32Add(mid, Int32Constant(1)));
Goto(&merge);
}
BIND(&merge);
@@ -6840,85 +7005,121 @@ void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
{
GotoIf(Int32GreaterThan(var_low.value(), limit), if_not_found);
- Node* sort_index =
- DescriptorArrayGetSortedKeyIndex(descriptors, var_low.value());
- Node* current_name = DescriptorArrayGetKey(descriptors, sort_index);
- Node* current_hash = LoadNameHashField(current_name);
+ TNode<Uint32T> sort_index =
+ GetSortedKeyIndex<Array>(array, var_low.value());
+ TNode<Name> current_name = GetKey<Array>(array, sort_index);
+ TNode<Uint32T> current_hash = LoadNameHashField(current_name);
GotoIf(Word32NotEqual(current_hash, hash), if_not_found);
Label next(this);
GotoIf(WordNotEqual(current_name, unique_name), &next);
- GotoIf(Int32GreaterThanOrEqual(sort_index, nof), if_not_found);
- var_name_index->Bind(DescriptorArrayToKeyIndex(sort_index));
+ GotoIf(Uint32GreaterThanOrEqual(sort_index, number_of_valid_entries),
+ if_not_found);
+ *var_name_index = ToKeyIndex<Array>(sort_index);
Goto(if_found);
BIND(&next);
- var_low.Bind(Int32Add(var_low.value(), Int32Constant(1)));
+ var_low = Unsigned(Int32Add(var_low.value(), Int32Constant(1)));
Goto(&scan_loop);
}
}
-void CodeStubAssembler::DescriptorLookup(Node* unique_name, Node* descriptors,
- Node* bitfield3, Label* if_found,
- Variable* var_name_index,
- Label* if_not_found) {
+void CodeStubAssembler::DescriptorLookup(
+ SloppyTNode<Name> unique_name, SloppyTNode<DescriptorArray> descriptors,
+ SloppyTNode<Uint32T> bitfield3, Label* if_found,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
Comment("DescriptorArrayLookup");
- Node* nof = DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
- GotoIf(Word32Equal(nof, Int32Constant(0)), if_not_found);
+ TNode<Uint32T> nof = DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ Lookup<DescriptorArray>(unique_name, descriptors, nof, if_found,
+ var_name_index, if_not_found);
+}
+
+void CodeStubAssembler::TransitionLookup(
+ SloppyTNode<Name> unique_name, SloppyTNode<TransitionArray> transitions,
+ Label* if_found, TVariable<IntPtrT>* var_name_index, Label* if_not_found) {
+ Comment("TransitionArrayLookup");
+ TNode<Uint32T> number_of_valid_transitions =
+ NumberOfEntries<TransitionArray>(transitions);
+ Lookup<TransitionArray>(unique_name, transitions, number_of_valid_transitions,
+ if_found, var_name_index, if_not_found);
+}
+
+template <typename Array>
+void CodeStubAssembler::Lookup(TNode<Name> unique_name, TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries,
+ Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
+ Comment("ArrayLookup");
+ if (!number_of_valid_entries) {
+ number_of_valid_entries = NumberOfEntries(array);
+ }
+ GotoIf(Word32Equal(number_of_valid_entries, Int32Constant(0)), if_not_found);
Label linear_search(this), binary_search(this);
const int kMaxElementsForLinearSearch = 32;
- Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxElementsForLinearSearch)),
+ Branch(Uint32LessThanOrEqual(number_of_valid_entries,
+ Int32Constant(kMaxElementsForLinearSearch)),
&linear_search, &binary_search);
BIND(&linear_search);
{
- DescriptorLookupLinear(unique_name, descriptors, ChangeInt32ToIntPtr(nof),
- if_found, var_name_index, if_not_found);
+ LookupLinear<Array>(unique_name, array, number_of_valid_entries, if_found,
+ var_name_index, if_not_found);
}
BIND(&binary_search);
{
- DescriptorLookupBinary(unique_name, descriptors, nof, if_found,
- var_name_index, if_not_found);
+ LookupBinary<Array>(unique_name, array, number_of_valid_entries, if_found,
+ var_name_index, if_not_found);
}
}
-void CodeStubAssembler::TryLookupProperty(
- Node* object, Node* map, Node* instance_type, Node* unique_name,
- Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
- Variable* var_meta_storage, Variable* var_name_index, Label* if_not_found,
- Label* if_bailout) {
- DCHECK_EQ(MachineRepresentation::kTagged, var_meta_storage->rep());
- DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
-
- Label if_objectisspecial(this);
- STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- &if_objectisspecial);
+void CodeStubAssembler::TryLookupPropertyInSimpleObject(
+ TNode<JSObject> object, TNode<Map> map, TNode<Name> unique_name,
+ Label* if_found_fast, Label* if_found_dict,
+ TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found) {
+ CSA_ASSERT(
+ this,
+ Word32BinaryNot(IsSpecialReceiverInstanceType(LoadMapInstanceType(map))));
uint32_t mask =
Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
USE(mask);
- Node* bit_field3 = LoadMapBitField3(map);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
Label if_isfastmap(this), if_isslowmap(this);
Branch(IsSetWord32<Map::IsDictionaryMapBit>(bit_field3), &if_isslowmap,
&if_isfastmap);
BIND(&if_isfastmap);
{
- Node* descriptors = LoadMapDescriptors(map);
- var_meta_storage->Bind(descriptors);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(map);
+ *var_meta_storage = descriptors;
DescriptorLookup(unique_name, descriptors, bit_field3, if_found_fast,
var_name_index, if_not_found);
}
BIND(&if_isslowmap);
{
- Node* dictionary = LoadSlowProperties(object);
- var_meta_storage->Bind(dictionary);
+ TNode<NameDictionary> dictionary = CAST(LoadSlowProperties(object));
+ *var_meta_storage = dictionary;
NameDictionaryLookup<NameDictionary>(dictionary, unique_name, if_found_dict,
var_name_index, if_not_found);
}
+}
+
+void CodeStubAssembler::TryLookupProperty(
+ SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ SloppyTNode<Int32T> instance_type, SloppyTNode<Name> unique_name,
+ Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
+ TVariable<HeapObject>* var_meta_storage, TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found, Label* if_bailout) {
+ Label if_objectisspecial(this);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &if_objectisspecial);
+
+ TryLookupPropertyInSimpleObject(object, map, unique_name, if_found_fast,
+ if_found_dict, var_meta_storage,
+ var_name_index, if_not_found);
+
BIND(&if_objectisspecial);
{
// Handle global object here and bailout for other special objects.
@@ -6926,13 +7127,13 @@ void CodeStubAssembler::TryLookupProperty(
if_bailout);
// Handle interceptors and access checks in runtime.
- Node* bit_field = LoadMapBitField(map);
+ TNode<Int32T> bit_field = LoadMapBitField(map);
int mask =
Map::HasNamedInterceptorBit::kMask | Map::IsAccessCheckNeededBit::kMask;
GotoIf(IsSetWord32(bit_field, mask), if_bailout);
- Node* dictionary = LoadSlowProperties(object);
- var_meta_storage->Bind(dictionary);
+ TNode<GlobalDictionary> dictionary = CAST(LoadSlowProperties(object));
+ *var_meta_storage = dictionary;
NameDictionaryLookup<GlobalDictionary>(
dictionary, unique_name, if_found_global, var_name_index, if_not_found);
@@ -6945,8 +7146,8 @@ void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
- VARIABLE(var_meta_storage, MachineRepresentation::kTagged);
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
+ TVARIABLE(HeapObject, var_meta_storage);
+ TVARIABLE(IntPtrT, var_name_index);
Label if_found_global(this);
TryLookupProperty(object, map, instance_type, unique_name, if_found, if_found,
@@ -7127,10 +7328,9 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
// |value| is the property backing store's contents, which is either a value
// or an accessor pair, as specified by |details|.
// Returns either the original value, or the result of the getter call.
-Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
- Node* context, Node* receiver,
- Label* if_bailout,
- GetOwnPropertyMode mode) {
+TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
+ Node* value, Node* details, Node* context, Node* receiver,
+ Label* if_bailout, GetOwnPropertyMode mode) {
VARIABLE(var_value, MachineRepresentation::kTagged, value);
Label done(this), if_accessor_info(this, Label::kDeferred);
@@ -7228,7 +7428,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
}
BIND(&done);
- return var_value.value();
+ return UncheckedCast<Object>(var_value.value());
}
void CodeStubAssembler::TryGetOwnProperty(
@@ -7248,8 +7448,8 @@ void CodeStubAssembler::TryGetOwnProperty(
DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
Comment("TryGetOwnProperty");
- VARIABLE(var_meta_storage, MachineRepresentation::kTagged);
- VARIABLE(var_entry, MachineType::PointerRepresentation());
+ TVARIABLE(HeapObject, var_meta_storage);
+ TVARIABLE(IntPtrT, var_entry);
Label if_found_fast(this), if_found_dict(this), if_found_global(this);
@@ -7257,8 +7457,7 @@ void CodeStubAssembler::TryGetOwnProperty(
if (!var_details) {
var_details = &local_var_details;
}
- Variable* vars[] = {var_value, var_details};
- Label if_found(this, 2, vars);
+ Label if_found(this, {var_value, var_details});
TryLookupProperty(object, map, instance_type, unique_name, &if_found_fast,
&if_found_dict, &if_found_global, &var_meta_storage,
@@ -7308,9 +7507,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Label* if_absent, Label* if_not_found,
Label* if_bailout) {
// Handle special objects in runtime.
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- if_bailout);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), if_bailout);
Node* elements_kind = LoadMapElementsKind(map);
@@ -7712,10 +7909,10 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
return var_result.value();
}
-Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
- ElementsKind kind,
- ParameterMode mode,
- int base_size) {
+TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+ ElementsKind kind,
+ ParameterMode mode,
+ int base_size) {
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
@@ -7735,13 +7932,13 @@ Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
return IntPtrConstant(base_size + element_size * index);
}
- Node* shifted_index =
+ TNode<WordT> shifted_index =
(element_size_shift == 0)
- ? index_node
+ ? UncheckedCast<WordT>(index_node)
: ((element_size_shift > 0)
? WordShl(index_node, IntPtrConstant(element_size_shift))
: WordSar(index_node, IntPtrConstant(-element_size_shift)));
- return IntPtrAdd(IntPtrConstant(base_size), shifted_index);
+ return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index));
}
Node* CodeStubAssembler::LoadFeedbackVector(Node* closure) {
@@ -7820,19 +8017,22 @@ void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kiterator_symbolRootIndex)),
if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::knext_stringRootIndex)), if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kspecies_symbolRootIndex)),
if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)),
if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kresolve_stringRootIndex)),
+ if_protector);
GotoIf(WordEqual(name, LoadRoot(Heap::kthen_stringRootIndex)), if_protector);
// Fall through if no case matched.
}
-Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
- return Select(TaggedIsSmi(receiver),
- [=] { return LoadRoot(Heap::kHeapNumberMapRootIndex); },
- [=] { return LoadMap(receiver); },
- MachineRepresentation::kTagged);
+TNode<Map> CodeStubAssembler::LoadReceiverMap(SloppyTNode<Object> receiver) {
+ return Select<Map>(
+ TaggedIsSmi(receiver),
+ [=] { return CAST(LoadRoot(Heap::kHeapNumberMapRootIndex)); },
+ [=] { return LoadMap(UncheckedCast<HeapObject>(receiver)); });
}
TNode<IntPtrT> CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
@@ -7961,10 +8161,10 @@ TNode<Context> CodeStubAssembler::LoadScriptContext(
TNode<ScriptContextTable> script_context_table = CAST(
LoadContextElement(native_context, Context::SCRIPT_CONTEXT_TABLE_INDEX));
- Node* script_context = LoadFixedArrayElement(
+ TNode<Context> script_context = CAST(LoadFixedArrayElement(
script_context_table, context_index,
- ScriptContextTable::kFirstContextSlotIndex * kPointerSize);
- return CAST(script_context);
+ ScriptContextTable::kFirstContextSlotIndex * kPointerSize));
+ return script_context;
}
namespace {
@@ -8178,12 +8378,23 @@ void CodeStubAssembler::EmitBigTypedArrayElementStore(
Node* offset = ElementOffsetFromIndex(intptr_key, BIGINT64_ELEMENTS,
INTPTR_PARAMETERS, 0);
MachineRepresentation rep = WordT::kMachineRepresentation;
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (!Is64()) {
+ StoreNoWriteBarrier(rep, backing_store, offset, var_high.value());
+ StoreNoWriteBarrier(rep, backing_store,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ var_low.value());
+ } else {
+ StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
+ }
+#else
StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
if (!Is64()) {
StoreNoWriteBarrier(rep, backing_store,
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
var_high.value());
}
+#endif
}
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
@@ -10022,7 +10233,7 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
}
TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
- SloppyTNode<Name> key,
+ SloppyTNode<Object> key,
SloppyTNode<Context> context,
HasPropertyLookupMode mode) {
Label call_runtime(this, Label::kDeferred), return_true(this),
@@ -10053,7 +10264,7 @@ TNode<Oddball> CodeStubAssembler::HasProperty(SloppyTNode<HeapObject> object,
BIND(&if_proxy);
{
- TNode<Name> name = CAST(ToName(context, key));
+ TNode<Name> name = ToName(context, key);
switch (mode) {
case kHasProperty:
GotoIf(IsPrivateSymbol(name), &return_false);
@@ -10187,19 +10398,17 @@ Node* CodeStubAssembler::Typeof(Node* value) {
return result_var.value();
}
-Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
- Node* context) {
- CSA_ASSERT(this, IsJSFunction(active_function));
-
+TNode<Object> CodeStubAssembler::GetSuperConstructor(
+ SloppyTNode<Context> context, SloppyTNode<JSFunction> active_function) {
Label is_not_constructor(this, Label::kDeferred), out(this);
- VARIABLE(result, MachineRepresentation::kTagged);
+ TVARIABLE(Object, result);
- Node* map = LoadMap(active_function);
- Node* prototype = LoadMapPrototype(map);
- Node* prototype_map = LoadMap(prototype);
+ TNode<Map> map = LoadMap(active_function);
+ TNode<Object> prototype = LoadMapPrototype(map);
+ TNode<Map> prototype_map = LoadMap(CAST(prototype));
GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
- result.Bind(prototype);
+ result = prototype;
Goto(&out);
BIND(&is_not_constructor);
@@ -10213,14 +10422,14 @@ Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
return result.value();
}
-Node* CodeStubAssembler::SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor) {
+TNode<Object> CodeStubAssembler::SpeciesConstructor(
+ SloppyTNode<Context> context, SloppyTNode<Object> object,
+ SloppyTNode<Object> default_constructor) {
Isolate* isolate = this->isolate();
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(default_constructor);
+ TVARIABLE(Object, var_result, default_constructor);
// 2. Let C be ? Get(O, "constructor").
- Node* const constructor =
+ TNode<Object> constructor =
GetProperty(context, object, isolate->factory()->constructor_string());
// 3. If C is undefined, return defaultConstructor.
@@ -10232,7 +10441,7 @@ Node* CodeStubAssembler::SpeciesConstructor(Node* context, Node* object,
MessageTemplate::kConstructorNotReceiver);
// 5. Let S be ? Get(C, @@species).
- Node* const species =
+ TNode<Object> species =
GetProperty(context, constructor, isolate->factory()->species_symbol());
// 6. If S is either undefined or null, return defaultConstructor.
@@ -10241,8 +10450,8 @@ Node* CodeStubAssembler::SpeciesConstructor(Node* context, Node* object,
// 7. If IsConstructor(S) is true, return S.
Label throw_error(this);
GotoIf(TaggedIsSmi(species), &throw_error);
- GotoIfNot(IsConstructorMap(LoadMap(species)), &throw_error);
- var_result.Bind(species);
+ GotoIfNot(IsConstructorMap(LoadMap(CAST(species))), &throw_error);
+ var_result = species;
Goto(&out);
// 8. Throw a TypeError exception.
@@ -10546,201 +10755,25 @@ Node* CodeStubAssembler::BitwiseOp(Node* left32, Node* right32,
UNREACHABLE();
}
-Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
- Node* array_type, Node* context,
- IterationKind mode) {
- int kBaseMapIndex = 0;
- switch (mode) {
- case IterationKind::kKeys:
- kBaseMapIndex = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
- break;
- case IterationKind::kValues:
- kBaseMapIndex = Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
- break;
- case IterationKind::kEntries:
- kBaseMapIndex = Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
- break;
- }
-
- // Fast Array iterator map index:
- // (kBaseIndex + kFastIteratorOffset) + ElementsKind (for JSArrays)
- // kBaseIndex + (ElementsKind - UINT8_ELEMENTS) (for JSTypedArrays)
- const int kFastIteratorOffset =
- Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX -
- Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
- STATIC_ASSERT(kFastIteratorOffset ==
- (Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
- Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
-
- // Slow Array iterator map index: (kBaseIndex + kSlowIteratorOffset)
- const int kSlowIteratorOffset =
- Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
- Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
- STATIC_ASSERT(kSlowIteratorOffset ==
- (Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
- Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
-
- // Assert: Type(array) is Object
- CSA_ASSERT(this, IsJSReceiverInstanceType(array_type));
-
- VARIABLE(var_result, MachineRepresentation::kTagged);
- VARIABLE(var_map_index, MachineType::PointerRepresentation());
- VARIABLE(var_array_map, MachineRepresentation::kTagged);
-
- Label return_result(this);
- Label allocate_iterator(this);
-
- if (mode == IterationKind::kKeys) {
- // There are only two key iterator maps, branch depending on whether or not
- // the receiver is a TypedArray or not.
-
- Label if_istypedarray(this), if_isgeneric(this);
-
- Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
- &if_isgeneric);
-
- BIND(&if_isgeneric);
- {
- Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
-
- BIND(&if_isfast);
- {
- var_map_index.Bind(
- IntPtrConstant(Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX));
- var_array_map.Bind(array_map);
- Goto(&allocate_iterator);
- }
-
- BIND(&if_isslow);
- {
- var_map_index.Bind(
- IntPtrConstant(Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX));
- var_array_map.Bind(UndefinedConstant());
- Goto(&allocate_iterator);
- }
- }
-
- BIND(&if_istypedarray);
- {
- var_map_index.Bind(
- IntPtrConstant(Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX));
- var_array_map.Bind(UndefinedConstant());
- Goto(&allocate_iterator);
- }
- } else {
- Label if_istypedarray(this), if_isgeneric(this);
- Branch(InstanceTypeEqual(array_type, JS_TYPED_ARRAY_TYPE), &if_istypedarray,
- &if_isgeneric);
-
- BIND(&if_isgeneric);
- {
- Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
-
- BIND(&if_isfast);
- {
- Label if_ispacked(this), if_isholey(this);
- Node* elements_kind = LoadMapElementsKind(array_map);
- Branch(IsHoleyFastElementsKind(elements_kind), &if_isholey,
- &if_ispacked);
-
- BIND(&if_isholey);
- {
- // Fast holey JSArrays can treat the hole as undefined if the
- // protector cell is valid, and the prototype chain is unchanged from
- // its initial state (because the protector cell is only tracked for
- // initial the Array and Object prototypes). Check these conditions
- // here, and take the slow path if any fail.
- GotoIf(IsNoElementsProtectorCellInvalid(), &if_isslow);
-
- Node* native_context = LoadNativeContext(context);
-
- Node* prototype = LoadMapPrototype(array_map);
- Node* array_prototype = LoadContextElement(
- native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- GotoIfNot(WordEqual(prototype, array_prototype), &if_isslow);
-
- Node* map = LoadMap(prototype);
- prototype = LoadMapPrototype(map);
- Node* object_prototype = LoadContextElement(
- native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
- GotoIfNot(WordEqual(prototype, object_prototype), &if_isslow);
-
- map = LoadMap(prototype);
- prototype = LoadMapPrototype(map);
- Branch(IsNull(prototype), &if_ispacked, &if_isslow);
- }
- BIND(&if_ispacked);
- {
- Node* map_index =
- IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
- ChangeUint32ToWord(LoadMapElementsKind(array_map)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
- map_index, IntPtrConstant(kBaseMapIndex +
- kFastIteratorOffset)));
- CSA_ASSERT(this, IntPtrLessThan(map_index,
- IntPtrConstant(kBaseMapIndex +
- kSlowIteratorOffset)));
-
- var_map_index.Bind(map_index);
- var_array_map.Bind(array_map);
- Goto(&allocate_iterator);
- }
- }
-
- BIND(&if_isslow);
- {
- Node* map_index = IntPtrAdd(IntPtrConstant(kBaseMapIndex),
- IntPtrConstant(kSlowIteratorOffset));
- var_map_index.Bind(map_index);
- var_array_map.Bind(UndefinedConstant());
- Goto(&allocate_iterator);
- }
- }
-
- BIND(&if_istypedarray);
- {
- Node* map_index =
- IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
- ChangeUint32ToWord(LoadMapElementsKind(array_map)));
- CSA_ASSERT(
- this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
- kFastIteratorOffset)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(map_index,
- IntPtrConstant(kBaseMapIndex)));
- var_map_index.Bind(map_index);
- var_array_map.Bind(UndefinedConstant());
- Goto(&allocate_iterator);
- }
- }
-
- BIND(&allocate_iterator);
- {
- Node* map = LoadFixedArrayElement(LoadNativeContext(context),
- var_map_index.value());
- var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
- Goto(&return_result);
- }
-
- BIND(&return_result);
- return var_result.value();
-}
-
-Node* CodeStubAssembler::AllocateJSArrayIterator(Node* array, Node* array_map,
- Node* map) {
+// ES #sec-createarrayiterator
+Node* CodeStubAssembler::CreateArrayIterator(Node* context, Node* object,
+ IterationKind kind) {
+ Node* native_context = LoadNativeContext(context);
+ Node* iterator_map = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX);
Node* iterator = Allocate(JSArrayIterator::kSize);
- StoreMapNoWriteBarrier(iterator, map);
+ StoreMapNoWriteBarrier(iterator, iterator_map);
StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
- StoreObjectFieldNoWriteBarrier(iterator,
- JSArrayIterator::kIteratedObjectOffset, array);
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kIteratedObjectOffset, object);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiConstant(0));
StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kIteratedObjectMapOffset, array_map);
+ iterator, JSArrayIterator::kKindOffset,
+ SmiConstant(Smi::FromInt(static_cast<int>(kind))));
return iterator;
}
@@ -10796,6 +10829,15 @@ Node* CodeStubAssembler::AllocateJSIteratorResultForEntry(Node* context,
return result;
}
+Node* CodeStubAssembler::ArraySpeciesCreate(TNode<Context> context,
+ TNode<Object> o,
+ TNode<Number> len) {
+ Node* constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context, o);
+ return ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
+ len);
+}
+
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
@@ -10852,7 +10894,7 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
}
TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
- int index, SloppyTNode<Object> default_value) {
+ int index, TNode<Object> default_value) {
CodeStubAssembler::TVariable<Object> result(assembler_);
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
@@ -10872,6 +10914,27 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
return result.value();
}
+TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
+ TNode<IntPtrT> index, TNode<Object> default_value) {
+ CodeStubAssembler::TVariable<Object> result(assembler_);
+ CodeStubAssembler::Label argument_missing(assembler_),
+ argument_done(assembler_, &result);
+
+ assembler_->GotoIf(
+ assembler_->UintPtrOrSmiGreaterThanOrEqual(
+ assembler_->IntPtrToParameter(index, argc_mode_), argc_, argc_mode_),
+ &argument_missing);
+ result = AtIndex(index);
+ assembler_->Goto(&argument_done);
+
+ assembler_->BIND(&argument_missing);
+ result = default_value;
+ assembler_->Goto(&argument_done);
+
+ assembler_->BIND(&argument_done);
+ return result.value();
+}
+
void CodeStubArguments::ForEach(
const CodeStubAssembler::VariableList& vars,
const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
@@ -10923,6 +10986,11 @@ Node* CodeStubAssembler::IsFastSmiOrTaggedElementsKind(Node* elements_kind) {
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND));
}
+Node* CodeStubAssembler::IsFastSmiElementsKind(Node* elements_kind) {
+ return Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(HOLEY_SMI_ELEMENTS));
+}
+
Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
@@ -10953,13 +11021,105 @@ Node* CodeStubAssembler::IsPromiseHookEnabledOrDebugIsActive() {
return Word32NotEqual(promise_hook_or_debug_is_active, Int32Constant(0));
}
+TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
+ CSA_ASSERT(this, SmiGreaterThanOrEqual(builtin_id, SmiConstant(0)));
+ CSA_ASSERT(this,
+ SmiLessThan(builtin_id, SmiConstant(Builtins::builtin_count)));
+
+ int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
+ int index_shift = kPointerSizeLog2 - kSmiShiftBits;
+ TNode<WordT> table_index =
+ index_shift >= 0 ? WordShl(BitcastTaggedToWord(builtin_id), index_shift)
+ : WordSar(BitcastTaggedToWord(builtin_id), -index_shift);
+
+ return CAST(
+ Load(MachineType::TaggedPointer(),
+ ExternalConstant(ExternalReference::builtins_address(isolate())),
+ table_index));
+}
+
+TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
+ SloppyTNode<SharedFunctionInfo> shared_info) {
+ TNode<Object> sfi_data =
+ LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
+
+ TYPED_VARIABLE_DEF(Code, sfi_code, this);
+
+ Label done(this);
+ Label check_instance_type(this);
+
+ // IsSmi: Is builtin
+ GotoIf(TaggedIsNotSmi(sfi_data), &check_instance_type);
+ sfi_code = LoadBuiltin(CAST(sfi_data));
+ Goto(&done);
+
+ // Switch on data's instance type.
+ BIND(&check_instance_type);
+ TNode<Int32T> data_type = LoadInstanceType(CAST(sfi_data));
+
+ int32_t case_values[] = {BYTECODE_ARRAY_TYPE, CODE_TYPE, FIXED_ARRAY_TYPE,
+ TUPLE2_TYPE, FUNCTION_TEMPLATE_INFO_TYPE};
+ Label check_is_bytecode_array(this);
+ Label check_is_code(this);
+ Label check_is_fixed_array(this);
+ Label check_is_pre_parsed_scope_data(this);
+ Label check_is_function_template_info(this);
+ Label check_is_interpreter_data(this);
+ Label* case_labels[] = {
+ &check_is_bytecode_array, &check_is_code, &check_is_fixed_array,
+ &check_is_pre_parsed_scope_data, &check_is_function_template_info};
+ STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
+ Switch(data_type, &check_is_interpreter_data, case_values, case_labels,
+ arraysize(case_labels));
+
+ // IsBytecodeArray: Interpret bytecode
+ BIND(&check_is_bytecode_array);
+ DCHECK(!Builtins::IsLazy(Builtins::kInterpreterEntryTrampoline));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
+ Goto(&done);
+
+ // IsCode: Run code
+ BIND(&check_is_code);
+ sfi_code = CAST(sfi_data);
+ Goto(&done);
+
+ // IsFixedArray: Instantiate using AsmWasmData,
+ BIND(&check_is_fixed_array);
+ DCHECK(!Builtins::IsLazy(Builtins::kInstantiateAsmJs));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), InstantiateAsmJs));
+ Goto(&done);
+
+ // IsPreParsedScopeData: Compile lazy
+ BIND(&check_is_pre_parsed_scope_data);
+ DCHECK(!Builtins::IsLazy(Builtins::kCompileLazy));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
+ Goto(&done);
+
+ // IsFunctionTemplateInfo: API call
+ BIND(&check_is_function_template_info);
+ DCHECK(!Builtins::IsLazy(Builtins::kHandleApiCall));
+ sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall));
+ Goto(&done);
+
+ // IsInterpreterData: Interpret bytecode
+ BIND(&check_is_interpreter_data);
+ // This is the default branch, so assert that we have the expected data type.
+ CSA_ASSERT(this,
+ Word32Equal(data_type, Int32Constant(INTERPRETER_DATA_TYPE)));
+ sfi_code = CAST(LoadObjectField(
+ CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset));
+ Goto(&done);
+
+ BIND(&done);
+ return sfi_code.value();
+}
+
Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
Node* shared_info,
Node* context) {
CSA_SLOW_ASSERT(this, IsMap(map));
- Node* const code =
- LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset);
+ Node* const code = GetSharedFunctionInfoCode(shared_info);
// TODO(ishell): All the callers of this function pass map loaded from
// Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX. So we can remove
@@ -11073,6 +11233,20 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
return receiver_map;
}
+TNode<IntPtrT> CodeStubAssembler::GetArgumentsLength(CodeStubArguments* args) {
+ return args->GetLength();
+}
+
+TNode<Object> CodeStubAssembler::GetArgumentValue(CodeStubArguments* args,
+ TNode<IntPtrT> index) {
+ return args->GetOptionalArgumentValue(index);
+}
+
+TNode<Object> CodeStubAssembler::GetArgumentValue(CodeStubArguments* args,
+ TNode<Smi> index) {
+ return args->GetOptionalArgumentValue(SmiUntag(index));
+}
+
void CodeStubAssembler::Print(const char* s) {
std::string formatted(s);
formatted += "\n";
@@ -11130,5 +11304,12 @@ void CodeStubAssembler::InitializeFunctionContext(Node* native_context,
native_context);
}
+void CodeStubAssembler::AssertIsStrongHeapObject(
+ SloppyTNode<HeapObject> object) {
+ CSA_SLOW_ASSERT(this, WordEqual(WordAnd(BitcastTaggedToWord(object),
+ IntPtrConstant(kHeapObjectTagMask)),
+ IntPtrConstant(kHeapObjectTag)));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 8fca0b667f..6c027f4a92 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -22,48 +22,54 @@ class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_CONSTANT_LIST(V) \
- V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
- V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
- V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
- V(BooleanMap, boolean_map, BooleanMap) \
- V(CodeMap, code_map, CodeMap) \
- V(EmptyPropertyDictionary, empty_property_dictionary, \
- EmptyPropertyDictionary) \
- V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
- V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
- EmptySlowElementDictionary) \
- V(empty_string, empty_string, EmptyString) \
- V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
- V(FalseValue, false_value, False) \
- V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
- V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
- V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
- V(FunctionTemplateInfoMap, function_template_info_map, \
- FunctionTemplateInfoMap) \
- V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
- V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
- V(HeapNumberMap, heap_number_map, HeapNumberMap) \
- V(length_string, length_string, LengthString) \
- V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
- V(MetaMap, meta_map, MetaMap) \
- V(MinusZeroValue, minus_zero_value, MinusZero) \
- V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
- V(NanValue, nan_value, Nan) \
- V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
- V(NullValue, null_value, Null) \
- V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
- V(prototype_string, prototype_string, PrototypeString) \
- V(SpeciesProtector, species_protector, SpeciesProtector) \
- V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
- V(SymbolMap, symbol_map, SymbolMap) \
- V(TheHoleValue, the_hole_value, TheHole) \
- V(TrueValue, true_value, True) \
- V(Tuple2Map, tuple2_map, Tuple2Map) \
- V(Tuple3Map, tuple3_map, Tuple3Map) \
- V(UndefinedValue, undefined_value, Undefined) \
- V(WeakCellMap, weak_cell_map, WeakCellMap) \
+#define HEAP_CONSTANT_LIST(V) \
+ V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \
+ V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \
+ V(AllocationSiteMap, allocation_site_map, AllocationSiteMap) \
+ V(BooleanMap, boolean_map, BooleanMap) \
+ V(CodeMap, code_map, CodeMap) \
+ V(EmptyPropertyDictionary, empty_property_dictionary, \
+ EmptyPropertyDictionary) \
+ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \
+ V(EmptySlowElementDictionary, empty_slow_element_dictionary, \
+ EmptySlowElementDictionary) \
+ V(empty_string, empty_string, EmptyString) \
+ V(EmptyWeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(FalseValue, false_value, False) \
+ V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \
+ V(FixedArrayMap, fixed_array_map, FixedArrayMap) \
+ V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, function_template_info_map, \
+ FunctionTemplateInfoMap) \
+ V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
+ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
+ V(HeapNumberMap, heap_number_map, HeapNumberMap) \
+ V(iterator_symbol, iterator_symbol, IteratorSymbol) \
+ V(length_string, length_string, LengthString) \
+ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
+ V(MetaMap, meta_map, MetaMap) \
+ V(MinusZeroValue, minus_zero_value, MinusZero) \
+ V(MutableHeapNumberMap, mutable_heap_number_map, MutableHeapNumberMap) \
+ V(NanValue, nan_value, Nan) \
+ V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \
+ V(NullValue, null_value, Null) \
+ V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \
+ V(prototype_string, prototype_string, PrototypeString) \
+ V(ArraySpeciesProtector, array_species_protector, ArraySpeciesProtector) \
+ V(TypedArraySpeciesProtector, typed_array_species_protector, \
+ TypedArraySpeciesProtector) \
+ V(PromiseSpeciesProtector, promise_species_protector, \
+ PromiseSpeciesProtector) \
+ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \
+ V(SymbolMap, symbol_map, SymbolMap) \
+ V(TheHoleValue, the_hole_value, TheHole) \
+ V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \
+ V(TrueValue, true_value, True) \
+ V(Tuple2Map, tuple2_map, Tuple2Map) \
+ V(Tuple3Map, tuple3_map, Tuple3Map) \
+ V(UndefinedValue, undefined_value, Undefined) \
+ V(WeakCellMap, weak_cell_map, WeakCellMap) \
V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap)
// Returned from IteratorBuiltinsAssembler::GetIterator(). Struct is declared
@@ -126,9 +132,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return ParameterRepresentation(OptimalParameterMode());
}
- Node* ParameterToIntPtr(Node* value, ParameterMode mode) {
+ TNode<IntPtrT> ParameterToIntPtr(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) value = SmiUntag(value);
- return value;
+ return UncheckedCast<IntPtrT>(value);
}
Node* IntPtrToParameter(SloppyTNode<IntPtrT> value, ParameterMode mode) {
@@ -150,6 +156,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return value;
}
+ TNode<Smi> TaggedToSmi(TNode<Object> value, Label* fail) {
+ GotoIf(TaggedIsNotSmi(value), fail);
+ return UncheckedCast<Smi>(value);
+ }
+
+ TNode<HeapObject> TaggedToHeapObject(TNode<Object> value, Label* fail) {
+ GotoIf(TaggedIsSmi(value), fail);
+ return UncheckedCast<HeapObject>(value);
+ }
+
+ TNode<JSArray> TaggedToJSArray(TNode<Object> value, Label* fail) {
+ GotoIfNot(IsJSArray(value), fail);
+ return UncheckedCast<JSArray>(value);
+ }
+
+ TNode<HeapObject> TaggedToCallable(TNode<Object> value, Label* fail) {
+ GotoIf(TaggedIsSmi(value), fail);
+ TNode<HeapObject> result = UncheckedCast<HeapObject>(value);
+ GotoIfNot(IsCallableMap(LoadMap(result)), fail);
+ return result;
+ }
+
Node* MatchesParameterMode(Node* value, ParameterMode mode);
#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
@@ -277,7 +305,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
}
#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName) \
- Node* SmiOpName(Node* a, Node* b) { \
+ TNode<BoolT> SmiOpName(Node* a, Node* b) { \
return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
}
SMI_COMPARISON_OP(SmiEqual, WordEqual)
@@ -293,7 +321,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> SmiMax(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
TNode<Smi> SmiMin(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
// Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
- Node* SmiMod(Node* a, Node* b);
+ TNode<Number> SmiMod(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
TNode<Number> SmiMul(SloppyTNode<Smi> a, SloppyTNode<Smi> b);
// Tries to computes dividend / divisor for Smi inputs; branching to bailout
@@ -307,6 +335,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Number> NumberSub(SloppyTNode<Number> a, SloppyTNode<Number> b);
void GotoIfNotNumber(Node* value, Label* is_not_number);
void GotoIfNumber(Node* value, Label* is_number);
+ TNode<Number> SmiToNumber(TNode<Smi> v) { return v; }
Node* BitwiseOp(Node* left32, Node* right32, Operation bitwise_op);
@@ -352,47 +381,57 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* extra_node4 = nullptr, const char* extra_node4_name = "",
Node* extra_node5 = nullptr, const char* extra_node5_name = "");
- Node* Select(SloppyTNode<BoolT> condition, const NodeGenerator& true_body,
- const NodeGenerator& false_body, MachineRepresentation rep);
+ // The following Call wrappers call an object according to the semantics that
+ // one finds in the EcmaScript spec, operating on an Callable (e.g. a
+ // JSFunction or proxy) rather than a Code object.
+ template <class... TArgs>
+ TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
+ TNode<JSReceiver> receiver, TArgs... args) {
+ return UncheckedCast<Object>(CallJS(
+ CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+ context, callable, receiver, args...));
+ }
+ template <class... TArgs>
+ TNode<Object> Call(TNode<Context> context, TNode<Object> callable,
+ TNode<Object> receiver, TArgs... args) {
+ return UncheckedCast<Object>(CallJS(CodeFactory::Call(isolate()), context,
+ callable, receiver, args...));
+ }
+
template <class A, class F, class G>
TNode<A> Select(SloppyTNode<BoolT> condition, const F& true_body,
- const G& false_body, MachineRepresentation rep) {
- return UncheckedCast<A>(Select(
+ const G& false_body) {
+ return UncheckedCast<A>(SelectImpl(
condition,
[&]() -> Node* { return base::implicit_cast<TNode<A>>(true_body()); },
[&]() -> Node* { return base::implicit_cast<TNode<A>>(false_body()); },
- rep));
+ MachineRepresentationOf<A>::value));
}
- Node* SelectConstant(Node* condition, Node* true_value, Node* false_value,
- MachineRepresentation rep);
template <class A>
TNode<A> SelectConstant(TNode<BoolT> condition, TNode<A> true_value,
- TNode<A> false_value, MachineRepresentation rep) {
- return UncheckedCast<A>(
- SelectConstant(condition, static_cast<Node*>(true_value),
- static_cast<Node*>(false_value), rep));
- }
-
- Node* SelectInt32Constant(Node* condition, int true_value, int false_value);
- Node* SelectIntPtrConstant(Node* condition, int true_value, int false_value);
- Node* SelectBooleanConstant(Node* condition);
- template <class A>
- TNode<A> SelectTaggedConstant(SloppyTNode<BoolT> condition,
- TNode<A> true_value,
- SloppyTNode<A> false_value) {
- static_assert(std::is_base_of<Object, A>::value, "not a tagged type");
- return SelectConstant(condition, true_value, false_value,
- MachineRepresentation::kTagged);
- }
- Node* SelectSmiConstant(Node* condition, Smi* true_value, Smi* false_value);
- Node* SelectSmiConstant(Node* condition, int true_value, Smi* false_value) {
+ TNode<A> false_value) {
+ return Select<A>(condition, [=] { return true_value; },
+ [=] { return false_value; });
+ }
+
+ TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
+ int true_value, int false_value);
+ TNode<IntPtrT> SelectIntPtrConstant(SloppyTNode<BoolT> condition,
+ int true_value, int false_value);
+ TNode<Oddball> SelectBooleanConstant(SloppyTNode<BoolT> condition);
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi* true_value,
+ Smi* false_value);
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ Smi* false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
}
- Node* SelectSmiConstant(Node* condition, Smi* true_value, int false_value) {
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, Smi* true_value,
+ int false_value) {
return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
}
- Node* SelectSmiConstant(Node* condition, int true_value, int false_value) {
+ TNode<Smi> SelectSmiConstant(SloppyTNode<BoolT> condition, int true_value,
+ int false_value) {
return SelectSmiConstant(condition, Smi::FromInt(true_value),
Smi::FromInt(false_value));
}
@@ -442,6 +481,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfFastJSArray(Node* object, Node* context, Label* if_true,
Label* if_false);
+ void BranchIfNotFastJSArray(Node* object, Node* context, Label* if_true,
+ Label* if_false) {
+ BranchIfFastJSArray(object, context, if_false, if_true);
+ }
void BranchIfFastJSArrayForCopy(Node* object, Node* context, Label* if_true,
Label* if_false);
@@ -508,25 +551,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the instance type of an HeapObject.
TNode<Int32T> LoadInstanceType(SloppyTNode<HeapObject> object);
// Compare the instance the type of the object against the provided one.
- Node* HasInstanceType(Node* object, InstanceType type);
- // Determines whether Array Iterator's prototype has changed.
- TNode<BoolT> HasInitialArrayIteratorPrototypeMap(
- TNode<Context> native_context);
+ TNode<BoolT> HasInstanceType(SloppyTNode<HeapObject> object,
+ InstanceType type);
// Determines whether Array's prototype has changed.
TNode<BoolT> InitialArrayPrototypeHasInitialArrayPrototypeMap(
TNode<Context> native_context);
// Determines whether an array's elements map has changed.
TNode<BoolT> HasInitialFastElementsKindMap(TNode<Context> native_context,
TNode<JSArray> jsarray);
- Node* DoesntHaveInstanceType(Node* object, InstanceType type);
- Node* TaggedDoesntHaveInstanceType(Node* any_tagged, InstanceType type);
+ TNode<BoolT> DoesntHaveInstanceType(SloppyTNode<HeapObject> object,
+ InstanceType type);
+ TNode<BoolT> TaggedDoesntHaveInstanceType(SloppyTNode<HeapObject> any_tagged,
+ InstanceType type);
// Load the properties backing store of a JSObject.
TNode<HeapObject> LoadSlowProperties(SloppyTNode<JSObject> object);
TNode<HeapObject> LoadFastProperties(SloppyTNode<JSObject> object);
// Load the elements backing store of a JSObject.
TNode<FixedArrayBase> LoadElements(SloppyTNode<JSObject> object);
// Load the length of a JSArray instance.
- TNode<Object> LoadJSArrayLength(SloppyTNode<JSArray> array);
+ TNode<Number> LoadJSArrayLength(SloppyTNode<JSArray> array);
// Load the length of a fast JSArray instance. Returns a positive Smi.
TNode<Smi> LoadFastJSArrayLength(SloppyTNode<JSArray> array);
// Load the length of a fixed array base instance.
@@ -563,7 +606,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the EnumLength of a Map.
Node* LoadMapEnumLength(SloppyTNode<Map> map);
// Load the back-pointer of a Map.
- Node* LoadMapBackPointer(SloppyTNode<Map> map);
+ TNode<Object> LoadMapBackPointer(SloppyTNode<Map> map);
// Load the identity hash of a JSRececiver.
TNode<IntPtrT> LoadJSReceiverIdentityHash(SloppyTNode<Object> receiver,
Label* if_no_hash = nullptr);
@@ -597,19 +640,60 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Object> LoadWeakCellValue(SloppyTNode<WeakCell> weak_cell,
Label* if_cleared = nullptr);
+ // Figures out whether the value of maybe_object is:
+ // - a SMI (jump to "if_smi", "extracted" will be the SMI value)
+ // - a cleared weak reference (jump to "if_cleared", "extracted" will be
+ // untouched)
+ // - a weak reference (jump to "if_weak", "extracted" will be the object
+ // pointed to)
+ // - a strong reference (jump to "if_strong", "extracted" will be the object
+ // pointed to)
+ void DispatchMaybeObject(Node* maybe_object, Label* if_smi, Label* if_cleared,
+ Label* if_weak, Label* if_strong,
+ Variable* extracted);
+
// Load an array element from a FixedArray.
- Node* LoadFixedArrayElement(Node* object, Node* index,
- int additional_offset = 0,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
- Node* LoadFixedArrayElement(Node* object, int index,
- int additional_offset = 0) {
+ TNode<Object> LoadFixedArrayElement(
+ SloppyTNode<Object> object, Node* index, int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+
+ TNode<Object> LoadFixedArrayElement(SloppyTNode<Object> object,
+ TNode<IntPtrT> index,
+ LoadSensitivity needs_poisoning) {
+ return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS,
+ needs_poisoning);
+ }
+
+ TNode<Object> LoadFixedArrayElement(
+ SloppyTNode<Object> object, TNode<IntPtrT> index,
+ int additional_offset = 0,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ return LoadFixedArrayElement(object, index, additional_offset,
+ INTPTR_PARAMETERS, needs_poisoning);
+ }
+
+ TNode<Object> LoadFixedArrayElement(
+ SloppyTNode<Object> object, int index, int additional_offset = 0,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset);
+ additional_offset, INTPTR_PARAMETERS,
+ needs_poisoning);
+ }
+ TNode<Object> LoadFixedArrayElement(TNode<Object> object, TNode<Smi> index) {
+ return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS);
}
// Load an array element from a FixedArray, untag it and return it as Word32.
- Node* LoadAndUntagToWord32FixedArrayElement(
- Node* object, Node* index, int additional_offset = 0,
+ TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
+ SloppyTNode<Object> object, Node* index, int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
+ TNode<Int32T> LoadAndUntagToWord32FixedArrayElement(
+ SloppyTNode<Object> object, int index, int additional_offset = 0) {
+ return LoadAndUntagToWord32FixedArrayElement(
+ object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS);
+ }
+
// Load an array element from a FixedDoubleArray.
Node* LoadFixedDoubleArrayElement(
Node* object, Node* index, MachineType machine_type,
@@ -669,6 +753,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load the "prototype" property of a JSFunction.
Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
+ Node* LoadSharedFunctionInfoBytecodeArray(Node* shared);
+
// Store the floating point value of a HeapNumber.
void StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value);
@@ -696,12 +782,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
barrier_mode);
}
+ Node* StoreJSArrayLength(TNode<JSArray> array, TNode<Smi> length);
+ Node* StoreElements(TNode<Object> object, TNode<FixedArrayBase> elements);
+
Node* StoreFixedArrayElement(
Node* object, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ Node* StoreFixedArrayElementSmi(
+ TNode<FixedArray> object, TNode<Smi> index, TNode<Object> value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ return StoreFixedArrayElement(object, index, value, barrier_mode, 0,
+ SMI_PARAMETERS);
+ }
+
Node* StoreFixedDoubleArrayElement(
Node* object, Node* index, Node* value,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -712,14 +808,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
int additional_offset = 0,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
- void EnsureArrayLengthWritable(Node* map, Label* bailout);
+ void EnsureArrayLengthWritable(TNode<Map> map, Label* bailout);
- // EnsureArrayPushable verifies that receiver is:
+ // EnsureArrayPushable verifies that receiver with this map is:
// 1. Is not a prototype.
// 2. Is not a dictionary.
// 3. Has a writeable length property.
// It returns ElementsKind as a node for further division into cases.
- Node* EnsureArrayPushable(Node* receiver, Label* bailout);
+ TNode<Int32T> EnsureArrayPushable(TNode<Map> map, Label* bailout);
void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
Label* bailout, Node* elements, Node* index,
@@ -751,6 +847,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Allocate a HeapNumber with a specific value.
TNode<HeapNumber> AllocateHeapNumberWithValue(SloppyTNode<Float64T> value,
MutableMode mode = IMMUTABLE);
+
// Allocate a BigInt with {length} digits. Sets the sign bit to {false}.
// Does not initialize the digits.
TNode<BigInt> AllocateBigInt(TNode<IntPtrT> length);
@@ -761,6 +858,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<UintPtrT> digit);
TNode<WordT> LoadBigIntBitfield(TNode<BigInt> bigint);
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
+
+ TNode<HeapNumber> AllocateHeapNumberWithValue(double value,
+ MutableMode mode = IMMUTABLE) {
+ return AllocateHeapNumberWithValue(Float64Constant(value), mode);
+ }
+
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(int length,
AllocationFlags flags = kNone);
@@ -845,6 +948,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* length, Node* allocation_site = nullptr,
ParameterMode capacity_mode = INTPTR_PARAMETERS);
+ Node* AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
+ TNode<Smi> capacity, TNode<Smi> length) {
+ return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ SMI_PARAMETERS);
+ }
+
+ Node* AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
+ TNode<IntPtrT> capacity, TNode<Smi> length) {
+ return AllocateJSArray(kind, array_map, capacity, length, nullptr,
+ INTPTR_PARAMETERS);
+ }
+
Node* CloneFastJSArray(Node* context, Node* array,
ParameterMode mode = INTPTR_PARAMETERS,
Node* allocation_site = nullptr);
@@ -859,17 +974,29 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
AllocationFlags flags = kNone,
Node* fixed_array_map = nullptr);
+ Node* AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
+ AllocationFlags flags = kNone) {
+ return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags);
+ }
+
+ Node* AllocateFixedArray(ElementsKind kind, TNode<Smi> capacity,
+ TNode<Map> map, AllocationFlags flags = kNone) {
+ return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags, map);
+ }
+
Node* AllocatePropertyArray(Node* capacity,
ParameterMode mode = INTPTR_PARAMETERS,
AllocationFlags flags = kNone);
- // Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
- Node* CreateArrayIterator(Node* array, Node* array_map, Node* array_type,
- Node* context, IterationKind mode);
- Node* AllocateJSArrayIterator(Node* array, Node* array_map, Node* map);
+ // Perform CreateArrayIterator (ES #sec-createarrayiterator).
+ Node* CreateArrayIterator(Node* context, Node* object, IterationKind mode);
+
Node* AllocateJSIteratorResult(Node* context, Node* value, Node* done);
Node* AllocateJSIteratorResultForEntry(Node* context, Node* key, Node* value);
+ Node* ArraySpeciesCreate(TNode<Context> context, TNode<Object> originalArray,
+ TNode<Number> len);
+
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index,
Heap::RootListIndex value_root_index,
@@ -917,6 +1044,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode mode = INTPTR_PARAMETERS);
+ void CopyFixedArrayElements(
+ ElementsKind from_kind, TNode<FixedArrayBase> from_array,
+ ElementsKind to_kind, TNode<FixedArrayBase> to_array,
+ TNode<Smi> first_element, TNode<Smi> element_count, TNode<Smi> capacity,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ CopyFixedArrayElements(from_kind, from_array, to_kind, to_array,
+ first_element, element_count, capacity, barrier_mode,
+ SMI_PARAMETERS);
+ }
+
+ TNode<FixedArray> ConvertFixedArrayBaseToFixedArray(
+ TNode<FixedArrayBase> base, Label* cast_fail);
+
+ TNode<FixedDoubleArray> ConvertFixedArrayBaseToFixedDoubleArray(
+ TNode<FixedArrayBase> base, Label* cast_fail) {
+ GotoIf(WordNotEqual(LoadMap(base),
+ LoadRoot(Heap::kFixedDoubleArrayMapRootIndex)),
+ cast_fail);
+ return UncheckedCast<FixedDoubleArray>(base);
+ }
+
enum class ExtractFixedArrayFlag {
kFixedArrays = 1,
kFixedDoubleArrays = 2,
@@ -950,11 +1098,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// passed as the |source| parameter.
// * |parameter_mode| determines the parameter mode of |first|, |count| and
// |capacity|.
- Node* ExtractFixedArray(Node* source, Node* first, Node* count = nullptr,
- Node* capacity = nullptr,
- ExtractFixedArrayFlags extract_flags =
- ExtractFixedArrayFlag::kAllFixedArrays,
- ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ TNode<FixedArray> ExtractFixedArray(
+ Node* source, Node* first, Node* count = nullptr,
+ Node* capacity = nullptr,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
+ TNode<FixedArray> ExtractFixedArray(
+ TNode<FixedArray> source, TNode<Smi> first, TNode<Smi> count,
+ TNode<Smi> capacity,
+ ExtractFixedArrayFlags extract_flags =
+ ExtractFixedArrayFlag::kAllFixedArrays) {
+ return ExtractFixedArray(source, first, count, capacity, extract_flags,
+ SMI_PARAMETERS);
+ }
// Copy the entire contents of a FixedArray or FixedDoubleArray to a new
// array, including special appropriate handling for empty arrays and COW
@@ -1091,12 +1249,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
- Node* InstanceTypeEqual(Node* instance_type, int type);
+ TNode<BoolT> InstanceTypeEqual(SloppyTNode<Int32T> instance_type, int type);
Node* IsAccessorInfo(Node* object);
Node* IsAccessorPair(Node* object);
Node* IsAllocationSite(Node* object);
Node* IsAnyHeapNumber(Node* object);
- Node* IsArrayIteratorInstanceType(Node* instance_type);
Node* IsNoElementsProtectorCellInvalid();
Node* IsBigIntInstanceType(Node* instance_type);
Node* IsBigInt(Node* object);
@@ -1119,6 +1276,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Context> native_context);
Node* IsFeedbackCell(Node* object);
Node* IsFeedbackVector(Node* object);
+ Node* IsContext(Node* object);
Node* IsFixedArray(Node* object);
Node* IsFixedArraySubclass(Node* object);
Node* IsFixedArrayWithKind(Node* object, ElementsKind kind);
@@ -1127,12 +1285,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsFixedTypedArray(Node* object);
Node* IsFunctionWithPrototypeSlotMap(Node* map);
Node* IsHashTable(Node* object);
- Node* IsHeapNumber(Node* object);
+ TNode<BoolT> IsHeapNumber(Node* object);
Node* IsIndirectStringInstanceType(Node* instance_type);
Node* IsJSArrayBuffer(Node* object);
Node* IsJSArrayInstanceType(Node* instance_type);
Node* IsJSArrayMap(Node* object);
Node* IsJSArray(Node* object);
+ Node* IsJSArrayIterator(Node* object);
Node* IsJSAsyncGeneratorObject(Node* object);
Node* IsJSFunctionInstanceType(Node* instance_type);
Node* IsJSFunctionMap(Node* object);
@@ -1163,7 +1322,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsNumberDictionary(Node* object);
Node* IsOneByteStringInstanceType(Node* instance_type);
Node* IsPrimitiveInstanceType(Node* instance_type);
- Node* IsPrivateSymbol(Node* object);
+ TNode<BoolT> IsPrivateSymbol(Node* object);
Node* IsPromiseCapability(Node* object);
Node* IsPropertyArray(Node* object);
Node* IsPropertyCell(Node* object);
@@ -1172,7 +1331,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
SloppyTNode<Map> map);
Node* IsSequentialStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
- Node* IsSpecialReceiverInstanceType(Node* instance_type);
+ TNode<BoolT> IsSpecialReceiverInstanceType(Node* instance_type);
Node* IsSpecialReceiverMap(Node* map);
Node* IsStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
@@ -1180,45 +1339,49 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsSymbol(Node* object);
Node* IsUndetectableMap(Node* map);
Node* IsWeakCell(Node* object);
- Node* IsZeroOrFixedArray(Node* object);
+ Node* IsZeroOrContext(Node* object);
inline Node* IsSharedFunctionInfo(Node* object) {
return IsSharedFunctionInfoMap(LoadMap(object));
}
+ Node* IsPromiseResolveProtectorCellInvalid();
Node* IsPromiseThenProtectorCellInvalid();
- Node* IsSpeciesProtectorCellInvalid();
+ Node* IsArraySpeciesProtectorCellInvalid();
+ Node* IsTypedArraySpeciesProtectorCellInvalid();
+ Node* IsPromiseSpeciesProtectorCellInvalid();
// True iff |object| is a Smi or a HeapNumber.
- Node* IsNumber(Node* object);
+ TNode<BoolT> IsNumber(SloppyTNode<Object> object);
// True iff |object| is a Smi or a HeapNumber or a BigInt.
- Node* IsNumeric(Node* object);
+ TNode<BoolT> IsNumeric(SloppyTNode<Object> object);
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
- Node* IsNumberNormalized(Node* number);
- Node* IsNumberPositive(Node* number);
+ TNode<BoolT> IsNumberNormalized(SloppyTNode<Number> number);
+ TNode<BoolT> IsNumberPositive(SloppyTNode<Number> number);
// True iff {number} is a positive number and a valid array index in the range
// [0, 2^32-1).
- Node* IsNumberArrayIndex(Node* number);
+ TNode<BoolT> IsNumberArrayIndex(SloppyTNode<Number> number);
+
+ Node* FixedArraySizeDoesntFitInNewSpace(
+ Node* element_count, int base_size = FixedArray::kHeaderSize,
+ ParameterMode mode = INTPTR_PARAMETERS);
// ElementsKind helpers:
Node* IsFastElementsKind(Node* elements_kind);
Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
+ Node* IsFastSmiElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
Node* IsElementsKindGreaterThan(Node* target_kind,
ElementsKind reference_kind);
- Node* FixedArraySizeDoesntFitInNewSpace(
- Node* element_count, int base_size = FixedArray::kHeaderSize,
- ParameterMode mode = INTPTR_PARAMETERS);
-
// String helpers.
// Load a character from a String (might flatten a ConsString).
TNode<Int32T> StringCharCodeAt(SloppyTNode<String> string,
SloppyTNode<IntPtrT> index);
// Return the single character string with only {code}.
- TNode<String> StringFromCharCode(TNode<Int32T> code);
+ TNode<String> StringFromSingleCharCode(TNode<Int32T> code);
// Return a new string object which holds a substring containing the range
// [from,to[ of string. |from| and |to| are expected to be tagged.
@@ -1244,9 +1407,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
Variable* var_right, Node* right_instance_type,
Label* did_something);
+ Node* DerefIndirectString(TNode<String> string, TNode<Int32T> instance_type,
+ Label* cannot_deref);
- TNode<String> StringFromCodePoint(TNode<Int32T> codepoint,
- UnicodeEncoding encoding);
+ TNode<String> StringFromSingleCodePoint(TNode<Int32T> codepoint,
+ UnicodeEncoding encoding);
// Type conversion helpers.
enum class BigIntHandling { kConvertToNumber, kThrow };
@@ -1255,7 +1420,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Convert a Number to a String.
TNode<String> NumberToString(TNode<Number> input);
// Convert an object to a name.
- Node* ToName(Node* context, Node* input);
+ TNode<Name> ToName(SloppyTNode<Context> context, SloppyTNode<Object> value);
// Convert a Non-Number object to a Number.
TNode<Number> NonNumberToNumber(
SloppyTNode<Context> context, SloppyTNode<HeapObject> input,
@@ -1310,10 +1475,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* range_error);
// ES6 7.1.15 ToLength, but with inlined fast path.
- Node* ToLength_Inline(Node* const context, Node* const input);
+ TNode<Number> ToLength_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input);
// ES6 7.1.4 ToInteger ( argument )
- TNode<Number> ToInteger_Inline(TNode<Context> context, TNode<Object> input,
+ TNode<Number> ToInteger_Inline(SloppyTNode<Context> context,
+ SloppyTNode<Object> input,
ToIntegerTruncationMode mode = kNoTruncation);
TNode<Number> ToInteger(SloppyTNode<Context> context,
SloppyTNode<Object> input,
@@ -1475,22 +1642,23 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Loads the details for the entry with the given key_index.
// Returns an untagged int32.
template <class ContainerType>
- Node* LoadDetailsByKeyIndex(Node* container, Node* key_index) {
+ TNode<Uint32T> LoadDetailsByKeyIndex(Node* container, Node* key_index) {
const int kKeyToDetailsOffset =
(ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
- return LoadAndUntagToWord32FixedArrayElement(container, key_index,
- kKeyToDetailsOffset);
+ return Unsigned(LoadAndUntagToWord32FixedArrayElement(container, key_index,
+ kKeyToDetailsOffset));
}
// Loads the value for the entry with the given key_index.
// Returns a tagged value.
template <class ContainerType>
- Node* LoadValueByKeyIndex(Node* container, Node* key_index) {
+ TNode<Object> LoadValueByKeyIndex(Node* container, Node* key_index) {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
- return LoadFixedArrayElement(container, key_index, kKeyToValueOffset);
+ return UncheckedCast<Object>(
+ LoadFixedArrayElement(container, key_index, kKeyToValueOffset));
}
// Stores the details for the entry with the given key_index.
@@ -1673,12 +1841,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(Node* object, Node* map, Node* instance_type,
- Node* unique_name, Label* if_found_fast,
+ void TryLookupProperty(SloppyTNode<JSObject> object, SloppyTNode<Map> map,
+ SloppyTNode<Int32T> instance_type,
+ SloppyTNode<Name> unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
- Variable* var_meta_storage, Variable* var_name_index,
+ TVariable<HeapObject>* var_meta_storage,
+ TVariable<IntPtrT>* var_name_index,
Label* if_not_found, Label* if_bailout);
+ // This is a building block for TryLookupProperty() above. Supports only
+ // non-special fast and dictionary objects.
+ void TryLookupPropertyInSimpleObject(TNode<JSObject> object, TNode<Map> map,
+ TNode<Name> unique_name,
+ Label* if_found_fast,
+ Label* if_found_dict,
+ TVariable<HeapObject>* var_meta_storage,
+ TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found);
+
// This method jumps to if_found if the element is known to exist. To
// if_absent if it's known to not exist. To if_not_found if the prototype
// chain needs to be checked. And if_bailout if the lookup is unsupported.
@@ -1751,7 +1931,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// used for a property store or deletion.
void CheckForAssociatedProtector(Node* name, Label* if_protector);
- Node* LoadReceiverMap(Node* receiver);
+ TNode<Map> LoadReceiverMap(SloppyTNode<Object> receiver);
// Emits keyed sloppy arguments load. Returns either the loaded value.
Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
@@ -1855,19 +2035,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
last_element_exclusive, body, mode, direction);
}
- Node* GetArrayAllocationSize(Node* element_count, ElementsKind kind,
- ParameterMode mode, int header_size) {
+ TNode<IntPtrT> GetArrayAllocationSize(Node* element_count, ElementsKind kind,
+ ParameterMode mode, int header_size) {
return ElementOffsetFromIndex(element_count, kind, mode, header_size);
}
- Node* GetFixedArrayAllocationSize(Node* element_count, ElementsKind kind,
- ParameterMode mode) {
+ TNode<IntPtrT> GetFixedArrayAllocationSize(Node* element_count,
+ ElementsKind kind,
+ ParameterMode mode) {
return GetArrayAllocationSize(element_count, kind, mode,
FixedArray::kHeaderSize);
}
- Node* GetPropertyArrayAllocationSize(Node* element_count,
- ParameterMode mode) {
+ TNode<IntPtrT> GetPropertyArrayAllocationSize(Node* element_count,
+ ParameterMode mode) {
return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode,
PropertyArray::kHeaderSize);
}
@@ -1886,6 +2067,30 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void BranchIfNumberRelationalComparison(Operation op, Node* left, Node* right,
Label* if_true, Label* if_false);
+ void BranchIfNumberLessThan(Node* left, Node* right, Label* if_true,
+ Label* if_false) {
+ BranchIfNumberRelationalComparison(Operation::kLessThan, left, right,
+ if_true, if_false);
+ }
+
+ void BranchIfNumberLessThanOrEqual(Node* left, Node* right, Label* if_true,
+ Label* if_false) {
+ BranchIfNumberRelationalComparison(Operation::kLessThanOrEqual, left, right,
+ if_true, if_false);
+ }
+
+ void BranchIfNumberGreaterThan(Node* left, Node* right, Label* if_true,
+ Label* if_false) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThan, left, right,
+ if_true, if_false);
+ }
+
+ void BranchIfNumberGreaterThanOrEqual(Node* left, Node* right, Label* if_true,
+ Label* if_false) {
+ BranchIfNumberRelationalComparison(Operation::kGreaterThanOrEqual, left,
+ right, if_true, if_false);
+ }
+
void BranchIfAccessorPair(Node* value, Label* if_accessor_pair,
Label* if_not_accessor_pair) {
GotoIf(TaggedIsSmi(value), if_not_accessor_pair);
@@ -1908,16 +2113,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
enum HasPropertyLookupMode { kHasProperty, kForInHasProperty };
TNode<Oddball> HasProperty(SloppyTNode<HeapObject> object,
- SloppyTNode<Name> key,
+ SloppyTNode<Object> key,
SloppyTNode<Context> context,
HasPropertyLookupMode mode);
Node* Typeof(Node* value);
- Node* GetSuperConstructor(Node* value, Node* context);
+ TNode<Object> GetSuperConstructor(SloppyTNode<Context> context,
+ SloppyTNode<JSFunction> active_function);
- Node* SpeciesConstructor(Node* context, Node* object,
- Node* default_constructor);
+ TNode<Object> SpeciesConstructor(SloppyTNode<Context> context,
+ SloppyTNode<Object> object,
+ SloppyTNode<Object> default_constructor);
Node* InstanceOf(Node* object, Node* callable, Node* context);
@@ -1927,8 +2134,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// TypedArray/ArrayBuffer helpers
Node* IsDetachedBuffer(Node* buffer);
- Node* ElementOffsetFromIndex(Node* index, ElementsKind kind,
- ParameterMode mode, int base_size = 0);
+ TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
+ ParameterMode mode, int base_size = 0);
+
+ // Load a builtin's code from the builtin array in the isolate.
+ TNode<Code> LoadBuiltin(TNode<Smi> builtin_id);
+
+ // Figure out the SFI's code object using its data field.
+ TNode<Code> GetSharedFunctionInfoCode(
+ SloppyTNode<SharedFunctionInfo> shared_info);
Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
Node* context);
@@ -1947,6 +2161,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Label* if_fast, Label* if_slow);
Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
+ TNode<IntPtrT> GetArgumentsLength(CodeStubArguments* args);
+ TNode<Object> GetArgumentValue(CodeStubArguments* args, TNode<IntPtrT> index);
+ TNode<Object> GetArgumentValue(CodeStubArguments* args, TNode<Smi> index);
+
// Support for printf-style debugging
void Print(const char* s);
void Print(const char* prefix, Node* tagged_value);
@@ -1972,28 +2190,58 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void PerformStackCheck(Node* context);
protected:
- void DescriptorLookup(Node* unique_name, Node* descriptors, Node* bitfield3,
- Label* if_found, Variable* var_name_index,
+ // Implements DescriptorArray::Search().
+ void DescriptorLookup(SloppyTNode<Name> unique_name,
+ SloppyTNode<DescriptorArray> descriptors,
+ SloppyTNode<Uint32T> bitfield3, Label* if_found,
+ TVariable<IntPtrT>* var_name_index,
Label* if_not_found);
- void DescriptorLookupLinear(Node* unique_name, Node* descriptors, Node* nof,
- Label* if_found, Variable* var_name_index,
- Label* if_not_found);
- void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
- Label* if_found, Variable* var_name_index,
- Label* if_not_found);
- Node* DescriptorNumberToIndex(SloppyTNode<Uint32T> descriptor_number);
- // Implements DescriptorArray::ToKeyIndex.
- // Returns an untagged IntPtr.
- Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
- // Implements DescriptorArray::GetKey.
- Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
- // Implements DescriptorArray::GetKey.
+
+ // Implements TransitionArray::SearchName() - searches for first transition
+ // entry with given name (note that there could be multiple entries with
+ // the same name).
+ void TransitionLookup(SloppyTNode<Name> unique_name,
+ SloppyTNode<TransitionArray> transitions,
+ Label* if_found, TVariable<IntPtrT>* var_name_index,
+ Label* if_not_found);
+
+ // Implements generic search procedure like i::Search<Array>().
+ template <typename Array>
+ void Lookup(TNode<Name> unique_name, TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries, Label* if_found,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found);
+
+ // Implements generic linear search procedure like i::LinearSearch<Array>().
+ template <typename Array>
+ void LookupLinear(TNode<Name> unique_name, TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries, Label* if_found,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found);
+
+ // Implements generic binary search procedure like i::BinarySearch<Array>().
+ template <typename Array>
+ void LookupBinary(TNode<Name> unique_name, TNode<Array> array,
+ TNode<Uint32T> number_of_valid_entries, Label* if_found,
+ TVariable<IntPtrT>* var_name_index, Label* if_not_found);
+
+ // Converts [Descriptor/Transition]Array entry number to a fixed array index.
+ template <typename Array>
+ TNode<IntPtrT> EntryIndexToIndex(TNode<Uint32T> entry_index);
+
+ // Implements [Descriptor/Transition]Array::ToKeyIndex.
+ template <typename Array>
+ TNode<IntPtrT> ToKeyIndex(TNode<Uint32T> entry_index);
+
+ // Implements [Descriptor/Transition]Array::GetKey.
+ template <typename Array>
+ TNode<Name> GetKey(TNode<Array> array, TNode<Uint32T> entry_index);
+
+ // Implements DescriptorArray::GetDetails.
TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
TNode<Uint32T> descriptor_number);
- Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
- Node* receiver, Label* if_bailout,
- GetOwnPropertyMode mode = kCallJSGetter);
+ TNode<Object> CallGetterIfAccessor(Node* value, Node* details, Node* context,
+ Node* receiver, Label* if_bailout,
+ GetOwnPropertyMode mode = kCallJSGetter);
TNode<IntPtrT> TryToIntptr(Node* key, Label* miss);
@@ -2004,6 +2252,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void InitializeFunctionContext(Node* native_context, Node* context,
int slots);
+ void AssertIsStrongHeapObject(SloppyTNode<HeapObject> object);
+
private:
friend class CodeStubArguments;
@@ -2036,13 +2286,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
TNode<Smi> length, TNode<String> first,
TNode<String> second, AllocationFlags flags);
- // Implements DescriptorArray::number_of_entries.
- // Returns an untagged int32.
- Node* DescriptorArrayNumberOfEntries(Node* descriptors);
- // Implements DescriptorArray::GetSortedKeyIndex.
- // Returns an untagged int32.
- Node* DescriptorArrayGetSortedKeyIndex(Node* descriptors,
- Node* descriptor_number);
+ Node* SelectImpl(TNode<BoolT> condition, const NodeGenerator& true_body,
+ const NodeGenerator& false_body, MachineRepresentation rep);
+
+ // Implements [Descriptor/Transition]Array::number_of_entries.
+ template <typename Array>
+ TNode<Uint32T> NumberOfEntries(TNode<Array> array);
+
+ // Implements [Descriptor/Transition]Array::GetSortedKeyIndex.
+ template <typename Array>
+ TNode<Uint32T> GetSortedKeyIndex(TNode<Array> descriptors,
+ TNode<Uint32T> entry_index);
Node* CollectFeedbackForString(Node* instance_type);
void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
@@ -2111,13 +2365,23 @@ class CodeStubArguments {
return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
}
TNode<Object> GetOptionalArgumentValue(int index,
- SloppyTNode<Object> default_value);
+ TNode<Object> default_value);
Node* GetLength(CodeStubAssembler::ParameterMode mode) const {
DCHECK_EQ(mode, argc_mode_);
return argc_;
}
+ TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
+ return GetOptionalArgumentValue(index, assembler_->UndefinedConstant());
+ }
+ TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
+ TNode<Object> default_value);
+ TNode<IntPtrT> GetLength() const {
+ DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
+ return assembler_->UncheckedCast<IntPtrT>(argc_);
+ }
+
typedef std::function<void(Node* arg)> ForEachBodyFunction;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index cfe16d268c..1503703a6e 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -14,7 +14,6 @@
#include "src/code-stub-assembler.h"
#include "src/code-stubs-utils.h"
#include "src/counters.h"
-#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic-stats.h"
@@ -88,7 +87,7 @@ void CodeStub::RecordCodeGeneration(Handle<Code> code) {
CodeCreateEvent(CodeEventListener::STUB_TAG,
AbstractCode::cast(*code), os.str().c_str()));
Counters* counters = isolate()->counters();
- counters->total_stubs_code_size()->Increment(code->instruction_size());
+ counters->total_stubs_code_size()->Increment(code->raw_instruction_size());
#ifdef DEBUG
code->VerifyEmbeddedObjects();
#endif
@@ -300,7 +299,8 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeAssemblerState state(isolate(), &zone, descriptor, Code::STUB,
- name, 1, GetKey());
+ name, PoisoningMitigationLevel::kOff, 1,
+ GetKey());
GenerateAssembly(&state);
return compiler::CodeAssembler::GenerateCode(&state);
}
@@ -506,6 +506,15 @@ TF_STUB(StoreSlowElementStub, CodeStubAssembler) {
receiver, name);
}
+TF_STUB(StoreInArrayLiteralSlowStub, CodeStubAssembler) {
+ Node* array = Parameter(Descriptor::kReceiver);
+ Node* index = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* context = Parameter(Descriptor::kContext);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, context, value, array,
+ index);
+}
+
TF_STUB(StoreFastElementStub, CodeStubAssembler) {
Comment("StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
stub->is_js_array(), ElementsKindToString(stub->elements_kind()),
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 6d35af1100..96d2ad51cf 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -7,8 +7,8 @@
#include "src/allocation.h"
#include "src/assembler.h"
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
@@ -37,8 +37,9 @@ class Node;
V(JSEntry) \
V(MathPow) \
V(ProfileEntryHook) \
- V(StoreSlowElement) \
/* --- TurboFanCodeStubs --- */ \
+ V(StoreSlowElement) \
+ V(StoreInArrayLiteralSlow) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -525,32 +526,14 @@ class InternalArrayConstructorStub: public PlatformCodeStub {
class MathPowStub: public PlatformCodeStub {
public:
- enum ExponentType { INTEGER, DOUBLE, TAGGED };
-
- MathPowStub(Isolate* isolate, ExponentType exponent_type)
- : PlatformCodeStub(isolate) {
- minor_key_ = ExponentTypeBits::encode(exponent_type);
- }
+ MathPowStub() : PlatformCodeStub(nullptr) {}
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (exponent_type() == TAGGED) {
- return MathPowTaggedDescriptor(isolate());
- } else if (exponent_type() == INTEGER) {
- return MathPowIntegerDescriptor(isolate());
- } else {
- // A CallInterfaceDescriptor doesn't specify double registers (yet).
- DCHECK_EQ(DOUBLE, exponent_type());
- return ContextOnlyDescriptor(isolate());
- }
+ // A CallInterfaceDescriptor doesn't specify double registers (yet).
+ return ContextOnlyDescriptor(isolate());
}
private:
- ExponentType exponent_type() const {
- return ExponentTypeBits::decode(minor_key_);
- }
-
- class ExponentTypeBits : public BitField<ExponentType, 0, 2> {};
-
DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
};
@@ -929,6 +912,18 @@ class StoreSlowElementStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
};
+class StoreInArrayLiteralSlowStub : public TurboFanCodeStub {
+ public:
+ StoreInArrayLiteralSlowStub(Isolate* isolate, KeyedAccessStoreMode mode)
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = CommonStoreModeBits::encode(mode);
+ }
+
+ private:
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
+ DEFINE_TURBOFAN_CODE_STUB(StoreInArrayLiteralSlow, TurboFanCodeStub);
+};
+
class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
public:
ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 046f692c07..ba9a071c33 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -5,8 +5,8 @@
#include "src/compilation-cache.h"
#include "src/counters.h"
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/visitors.h"
diff --git a/deps/v8/src/compilation-dependencies.cc b/deps/v8/src/compilation-dependencies.cc
index 5913f3a5a6..8be814025a 100644
--- a/deps/v8/src/compilation-dependencies.cc
+++ b/deps/v8/src/compilation-dependencies.cc
@@ -4,8 +4,8 @@
#include "src/compilation-dependencies.h"
-#include "src/factory.h"
#include "src/handles-inl.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index 1f70336fcc..cb66f86532 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
+class OptimizedCompilationInfo;
class CompilationStatistics;
struct AsPrintableStatistics {
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 27af96c85e..56620268ce 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -7,8 +7,8 @@
#include "include/v8-platform.h"
#include "include/v8.h"
#include "src/base/platform/time.h"
+#include "src/base/template-utils.h"
#include "src/cancelable-task.h"
-#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler-dispatcher/unoptimized-compile-job.h"
@@ -123,11 +123,11 @@ void CompilerDispatcher::AbortTask::RunInternal() {
dispatcher_->AbortInactiveJobs();
}
-class CompilerDispatcher::BackgroundTask : public CancelableTask {
+class CompilerDispatcher::WorkerTask : public CancelableTask {
public:
- BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher);
- ~BackgroundTask() override;
+ WorkerTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher);
+ ~WorkerTask() override;
// CancelableTask implementation.
void RunInternal() override;
@@ -135,17 +135,17 @@ class CompilerDispatcher::BackgroundTask : public CancelableTask {
private:
CompilerDispatcher* dispatcher_;
- DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
+ DISALLOW_COPY_AND_ASSIGN(WorkerTask);
};
-CompilerDispatcher::BackgroundTask::BackgroundTask(
- Isolate* isolate, CancelableTaskManager* task_manager,
- CompilerDispatcher* dispatcher)
+CompilerDispatcher::WorkerTask::WorkerTask(Isolate* isolate,
+ CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher)
: CancelableTask(task_manager), dispatcher_(dispatcher) {}
-CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
+CompilerDispatcher::WorkerTask::~WorkerTask() {}
-void CompilerDispatcher::BackgroundTask::RunInternal() {
+void CompilerDispatcher::WorkerTask::RunInternal() {
dispatcher_->DoBackgroundWork();
}
@@ -188,7 +188,7 @@ CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
memory_pressure_level_(MemoryPressureLevel::kNone),
abort_(false),
idle_task_scheduled_(false),
- num_background_tasks_(0),
+ num_worker_tasks_(0),
main_thread_blocking_on_job_(nullptr),
block_for_testing_(false),
semaphore_for_testing_(0) {
@@ -434,7 +434,7 @@ void CompilerDispatcher::AbortInactiveJobs() {
}
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
- if (num_background_tasks_ == 0) abort_ = false;
+ if (num_worker_tasks_ == 0) abort_ = false;
}
}
@@ -511,24 +511,22 @@ void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
base::LockGuard<base::Mutex> lock(&mutex_);
pending_background_jobs_.insert(job);
}
- ScheduleMoreBackgroundTasksIfNeeded();
+ ScheduleMoreWorkerTasksIfNeeded();
}
-void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
+void CompilerDispatcher::ScheduleMoreWorkerTasksIfNeeded() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
- "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
+ "V8.CompilerDispatcherScheduleMoreWorkerTasksIfNeeded");
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (pending_background_jobs_.empty()) return;
- if (platform_->NumberOfAvailableBackgroundThreads() <=
- num_background_tasks_) {
+ if (platform_->NumberOfWorkerThreads() <= num_worker_tasks_) {
return;
}
- ++num_background_tasks_;
+ ++num_worker_tasks_;
}
- platform_->CallOnBackgroundThread(
- new BackgroundTask(isolate_, task_manager_.get(), this),
- v8::Platform::kShortRunningTask);
+ platform_->CallOnWorkerThread(
+ base::make_unique<WorkerTask>(isolate_, task_manager_.get(), this));
}
void CompilerDispatcher::DoBackgroundWork() {
@@ -572,7 +570,7 @@ void CompilerDispatcher::DoBackgroundWork() {
{
base::LockGuard<base::Mutex> lock(&mutex_);
- --num_background_tasks_;
+ --num_worker_tasks_;
if (running_background_jobs_.empty() && abort_) {
// This is the last background job that finished. The abort task
@@ -720,7 +718,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::RemoveJob(
it = jobs_.erase(it);
if (jobs_.empty()) {
base::LockGuard<base::Mutex> lock(&mutex_);
- if (num_background_tasks_ == 0) abort_ = false;
+ if (num_worker_tasks_ == 0) abort_ = false;
}
return it;
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index 240f025c1e..d7b2dc802f 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -117,16 +117,16 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepParsed);
FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
FRIEND_TEST(CompilerDispatcherTest, CompileOnBackgroundThread);
- FRIEND_TEST(CompilerDispatcherTest, FinishNowWithBackgroundTask);
- FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask);
- FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask);
+ FRIEND_TEST(CompilerDispatcherTest, FinishNowWithWorkerTask);
+ FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingWorkerTask);
+ FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningWorkerTask);
FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
FRIEND_TEST(CompilerDispatcherTest, CompileMultipleOnBackgroundThread);
typedef std::map<JobId, std::unique_ptr<CompilerDispatcherJob>> JobMap;
typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
class AbortTask;
- class BackgroundTask;
+ class WorkerTask;
class IdleTask;
void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
@@ -135,7 +135,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
bool CanEnqueue(Handle<SharedFunctionInfo> function);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
- void ScheduleMoreBackgroundTasksIfNeeded();
+ void ScheduleMoreWorkerTasksIfNeeded();
void ScheduleIdleTaskFromAnyThread();
void ScheduleIdleTaskIfNeeded();
void ScheduleAbortTask();
@@ -183,8 +183,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
bool idle_task_scheduled_;
- // Number of scheduled or running BackgroundTask objects.
- size_t num_background_tasks_;
+ // Number of scheduled or running WorkerTask objects.
+ int num_worker_tasks_;
// The set of CompilerDispatcherJobs that can be advanced on any thread.
std::unordered_set<CompilerDispatcherJob*> pending_background_jobs_;
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 7dc73b146c..dd7527dfbf 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -5,11 +5,12 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/base/atomicops.h"
+#include "src/base/template-utils.h"
#include "src/cancelable-task.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -18,10 +19,11 @@ namespace internal {
namespace {
-void DisposeCompilationJob(CompilationJob* job, bool restore_function_code) {
+void DisposeCompilationJob(OptimizedCompilationJob* job,
+ bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
- function->set_code(function->shared()->code());
+ function->set_code(function->shared()->GetCode());
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
@@ -91,10 +93,11 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
DeleteArray(input_queue_);
}
-CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
+OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
+ bool check_if_flushing) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return nullptr;
- CompilationJob* job = input_queue_[InputQueueIndex(0)];
+ OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
@@ -108,7 +111,7 @@ CompilationJob* OptimizingCompileDispatcher::NextInput(bool check_if_flushing) {
return job;
}
-void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
+void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
@@ -125,7 +128,7 @@ void OptimizingCompileDispatcher::CompileNext(CompilationJob* job) {
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
for (;;) {
- CompilationJob* job = nullptr;
+ OptimizedCompilationJob* job = nullptr;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
@@ -142,7 +145,7 @@ void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
if (FLAG_block_concurrent_recompilation) Unblock();
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
while (input_queue_length_ > 0) {
- CompilationJob* job = input_queue_[InputQueueIndex(0)];
+ OptimizedCompilationJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
@@ -190,14 +193,14 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
for (;;) {
- CompilationJob* job = nullptr;
+ OptimizedCompilationJob* job = nullptr;
{
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
if (output_queue_.empty()) return;
job = output_queue_.front();
output_queue_.pop();
}
- CompilationInfo* info = job->compilation_info();
+ OptimizedCompilationInfo* info = job->compilation_info();
Handle<JSFunction> function(*info->closure());
if (function->HasOptimizedCode()) {
if (FLAG_trace_concurrent_recompilation) {
@@ -212,7 +215,8 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
}
-void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
+void OptimizingCompileDispatcher::QueueForOptimization(
+ OptimizedCompilationJob* job) {
DCHECK(IsQueueAvailable());
{
// Add job to the back of the input queue.
@@ -224,15 +228,15 @@ void OptimizingCompileDispatcher::QueueForOptimization(CompilationJob* job) {
if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
} else {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<CompileTask>(isolate_, this));
}
}
void OptimizingCompileDispatcher::Unblock() {
while (blocked_jobs_ > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<CompileTask>(isolate_, this));
blocked_jobs_--;
}
}
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 551b7c3563..deb7af99a4 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -18,7 +18,7 @@
namespace v8 {
namespace internal {
-class CompilationJob;
+class OptimizedCompilationJob;
class SharedFunctionInfo;
class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
@@ -32,7 +32,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
base::Relaxed_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
- input_queue_ = NewArray<CompilationJob*>(input_queue_capacity_);
+ input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
}
~OptimizingCompileDispatcher();
@@ -40,7 +40,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void Stop();
void Flush(BlockingBehavior blocking_behavior);
// Takes ownership of |job|.
- void QueueForOptimization(CompilationJob* job);
+ void QueueForOptimization(OptimizedCompilationJob* job);
void Unblock();
void InstallOptimizedFunctions();
@@ -57,8 +57,8 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
enum ModeFlag { COMPILE, FLUSH };
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(CompilationJob* job);
- CompilationJob* NextInput(bool check_if_flushing = false);
+ void CompileNext(OptimizedCompilationJob* job);
+ OptimizedCompilationJob* NextInput(bool check_if_flushing = false);
inline int InputQueueIndex(int i) {
int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -70,14 +70,14 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
Isolate* isolate_;
// Circular queue of incoming recompilation tasks (including OSR).
- CompilationJob** input_queue_;
+ OptimizedCompilationJob** input_queue_;
int input_queue_capacity_;
int input_queue_length_;
int input_queue_shift_;
base::Mutex input_queue_mutex_;
// Queue of recompilation tasks ready to be installed (excluding OSR).
- std::queue<CompilationJob*> output_queue_;
+ std::queue<OptimizedCompilationJob*> output_queue_;
// Used for job based recompilation which has multiple producers on
// different threads.
base::Mutex output_queue_mutex_;
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
index 23a607a093..3e90ccfa40 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.cc
@@ -6,7 +6,6 @@
#include "src/assert-scope.h"
#include "src/base/optional.h"
-#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler.h"
#include "src/flags.h"
@@ -18,6 +17,7 @@
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/unicode-cache.h"
+#include "src/unoptimized-compilation-info.h"
#include "src/utils.h"
namespace v8 {
@@ -77,8 +77,7 @@ UnoptimizedCompileJob::UnoptimizedCompileJob(Isolate* isolate,
trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
DCHECK(!shared_->is_toplevel());
// TODO(rmcilroy): Handle functions with non-empty outer scope info.
- DCHECK(shared_->outer_scope_info()->IsTheHole(isolate) ||
- ScopeInfo::cast(shared_->outer_scope_info())->length() == 0);
+ DCHECK(!shared_->HasOuterScopeInfo());
HandleScope scope(isolate);
Handle<Script> script(Script::cast(shared_->script()), isolate);
Handle<String> source(String::cast(script->source()), isolate);
@@ -127,7 +126,7 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
parse_info_->InitFromIsolate(isolate);
if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- source, shared_->start_position(), shared_->end_position()));
+ source, shared_->StartPosition(), shared_->EndPosition()));
parse_info_->set_character_stream(std::move(stream));
} else {
source = String::Flatten(source);
@@ -151,8 +150,8 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
} else {
// Otherwise, create a copy of the part of the string we'll parse in the
// zone.
- length = (shared_->end_position() - shared_->start_position());
- offset = shared_->start_position();
+ length = (shared_->EndPosition() - shared_->StartPosition());
+ offset = shared_->StartPosition();
int byte_len = length * (source->IsOneByteRepresentation() ? 1 : 2);
data = parse_info_->zone()->New(byte_len);
@@ -162,12 +161,11 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
DCHECK(content.IsFlat());
if (content.IsOneByte()) {
MemCopy(const_cast<void*>(data),
- &content.ToOneByteVector().at(shared_->start_position()),
+ &content.ToOneByteVector().at(shared_->StartPosition()),
byte_len);
} else {
MemCopy(const_cast<void*>(data),
- &content.ToUC16Vector().at(shared_->start_position()),
- byte_len);
+ &content.ToUC16Vector().at(shared_->StartPosition()), byte_len);
}
}
Handle<String> wrapper;
@@ -188,15 +186,15 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
}
wrapper_ = isolate->global_handles()->Create(*wrapper);
std::unique_ptr<Utf16CharacterStream> stream(
- ScannerStream::For(wrapper_, shared_->start_position() - offset,
- shared_->end_position() - offset));
+ ScannerStream::For(wrapper_, shared_->StartPosition() - offset,
+ shared_->EndPosition() - offset));
parse_info_->set_character_stream(std::move(stream));
}
parse_info_->set_hash_seed(isolate->heap()->HashSeed());
parse_info_->set_is_named_expression(shared_->is_named_expression());
- parse_info_->set_compiler_hints(shared_->compiler_hints());
- parse_info_->set_start_position(shared_->start_position());
- parse_info_->set_end_position(shared_->end_position());
+ parse_info_->set_function_flags(shared_->flags());
+ parse_info_->set_start_position(shared_->StartPosition());
+ parse_info_->set_end_position(shared_->EndPosition());
parse_info_->set_unicode_cache(unicode_cache_.get());
parse_info_->set_language_mode(shared_->language_mode());
parse_info_->set_function_literal_id(shared_->function_literal_id());
@@ -207,13 +205,12 @@ void UnoptimizedCompileJob::PrepareOnMainThread(Isolate* isolate) {
parser_.reset(new Parser(parse_info_.get()));
MaybeHandle<ScopeInfo> outer_scope_info;
- if (!shared_->outer_scope_info()->IsTheHole(isolate) &&
- ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
- outer_scope_info = handle(ScopeInfo::cast(shared_->outer_scope_info()));
+ if (shared_->HasOuterScopeInfo()) {
+ outer_scope_info = handle(shared_->GetOuterScopeInfo());
}
parser_->DeserializeScopeChain(parse_info_.get(), outer_scope_info);
- Handle<String> name(shared_->name());
+ Handle<String> name(shared_->Name());
parse_info_->set_function_name(
parse_info_->ast_value_factory()->GetString(name));
set_status(Status::kPrepared);
@@ -293,9 +290,8 @@ void UnoptimizedCompileJob::FinalizeOnMainThread(Isolate* isolate) {
// Allocate scope infos for the literal.
DeclarationScope::AllocateScopeInfos(parse_info_.get(), isolate,
AnalyzeMode::kRegular);
- compilation_job_->compilation_info()->set_shared_info(shared_);
if (compilation_job_->state() == CompilationJob::State::kFailed ||
- !Compiler::FinalizeCompilationJob(compilation_job_.release(),
+ !Compiler::FinalizeCompilationJob(compilation_job_.release(), shared_,
isolate)) {
if (!isolate->has_pending_exception()) isolate->StackOverflow();
set_status(Status::kFailed);
diff --git a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
index 153c8ba26d..8352e4e795 100644
--- a/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
+++ b/deps/v8/src/compiler-dispatcher/unoptimized-compile-job.h
@@ -18,8 +18,6 @@ namespace internal {
class AstValueFactory;
class AstStringConstants;
class CompilerDispatcherTracer;
-class CompilationInfo;
-class CompilationJob;
class DeferredHandles;
class FunctionLiteral;
class Isolate;
@@ -28,6 +26,7 @@ class Parser;
class SharedFunctionInfo;
class String;
class UnicodeCache;
+class UnoptimizedCompilationJob;
class Utf16CharacterStream;
class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
@@ -77,7 +76,7 @@ class V8_EXPORT_PRIVATE UnoptimizedCompileJob : public CompilerDispatcherJob {
std::unique_ptr<Parser> parser_;
// Members required for compiling.
- std::unique_ptr<CompilationJob> compilation_job_;
+ std::unique_ptr<UnoptimizedCompilationJob> compilation_job_;
bool trace_compiler_dispatcher_jobs_;
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 9b25832668..1b3a154f52 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -15,7 +15,6 @@
#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/compilation-cache.h"
-#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
@@ -29,6 +28,7 @@
#include "src/log-inl.h"
#include "src/messages.h"
#include "src/objects/map.h"
+#include "src/optimized-compilation-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
@@ -37,23 +37,25 @@
#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
#include "src/unicode-cache.h"
+#include "src/unoptimized-compilation-info.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-// A wrapper around a CompilationInfo that detaches the Handles from
+// A wrapper around a OptimizedCompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
class CompilationHandleScope final {
public:
- explicit CompilationHandleScope(Isolate* isolate, CompilationInfo* info)
+ explicit CompilationHandleScope(Isolate* isolate,
+ OptimizedCompilationInfo* info)
: deferred_(isolate), info_(info) {}
~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
private:
DeferredHandleScope deferred_;
- CompilationInfo* info_;
+ OptimizedCompilationInfo* info_;
};
// Helper that times a scoped region and records the elapsed time.
@@ -69,19 +71,128 @@ struct ScopedTimer {
base::TimeDelta* location_;
};
+namespace {
+
+void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Script> script,
+ Handle<AbstractCode> abstract_code, bool optimizing,
+ double time_taken_ms, Isolate* isolate) {
+ DCHECK(!abstract_code.is_null());
+ DCHECK(!abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy)));
+
+ // Log the code generation. If source information is available include
+ // script name and line number. Check explicitly whether logging is
+ // enabled as finding the line number is not free.
+ if (!isolate->logger()->is_logging_code_events() &&
+ !isolate->is_profiling() && !FLAG_log_function_events) {
+ return;
+ }
+
+ int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
+ int column_num = Script::GetColumnNumber(script, shared->StartPosition()) + 1;
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : isolate->heap()->empty_string();
+ CodeEventListener::LogEventsAndTags log_tag =
+ Logger::ToNativeByScript(tag, *script);
+ PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared,
+ script_name, line_num, column_num));
+ if (!FLAG_log_function_events) return;
+
+ DisallowHeapAllocation no_gc;
+
+ std::string name = optimizing ? "optimize" : "compile";
+ switch (tag) {
+ case CodeEventListener::EVAL_TAG:
+ name += "-eval";
+ break;
+ case CodeEventListener::SCRIPT_TAG:
+ break;
+ case CodeEventListener::LAZY_COMPILE_TAG:
+ name += "-lazy";
+ break;
+ case CodeEventListener::FUNCTION_TAG:
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ LOG(isolate, FunctionEvent(name.c_str(), nullptr, script->id(), time_taken_ms,
+ shared->StartPosition(), shared->EndPosition(),
+ shared->DebugName()));
+}
+
+} // namespace
+
+// ----------------------------------------------------------------------------
+// Implementation of UnoptimizedCompilationJob
+
+CompilationJob::Status UnoptimizedCompilationJob::ExecuteJob() {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
+
+ // Delegate to the underlying implementation.
+ DCHECK_EQ(state(), State::kReadyToExecute);
+ ScopedTimer t(&time_taken_to_execute_);
+ return UpdateState(ExecuteJobImpl(), State::kReadyToFinalize);
+}
+
+CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob(
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ DisallowCodeDependencyChange no_dependency_change;
+ DisallowJavascriptExecution no_js(isolate);
+
+ // Delegate to the underlying implementation.
+ DCHECK_EQ(state(), State::kReadyToFinalize);
+ ScopedTimer t(&time_taken_to_finalize_);
+ return UpdateState(FinalizeJobImpl(shared_info, isolate), State::kSucceeded);
+}
+
+void UnoptimizedCompilationJob::RecordCompilationStats(Isolate* isolate) const {
+ int code_size;
+ if (compilation_info()->has_bytecode_array()) {
+ code_size = compilation_info()->bytecode_array()->SizeIncludingMetadata();
+ } else {
+ DCHECK(compilation_info()->has_asm_wasm_data());
+ code_size = compilation_info()->asm_wasm_data()->Size();
+ }
+
+ Counters* counters = isolate->counters();
+ // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
+ counters->total_baseline_code_size()->Increment(code_size);
+ counters->total_baseline_compile_count()->Increment(1);
+
+ // TODO(5203): Add timers for each phase of compilation.
+}
+
+void UnoptimizedCompilationJob::RecordFunctionCompilation(
+ CodeEventListener::LogEventsAndTags tag, Handle<SharedFunctionInfo> shared,
+ Isolate* isolate) const {
+ Handle<AbstractCode> abstract_code;
+ if (compilation_info()->has_bytecode_array()) {
+ abstract_code =
+ Handle<AbstractCode>::cast(compilation_info()->bytecode_array());
+ } else {
+ DCHECK(compilation_info()->has_asm_wasm_data());
+ abstract_code =
+ Handle<AbstractCode>::cast(BUILTIN_CODE(isolate, InstantiateAsmJs));
+ }
+
+ double time_taken_ms = time_taken_to_execute_.InMillisecondsF() +
+ time_taken_to_finalize_.InMillisecondsF();
+
+ LogFunctionCompilation(tag, shared, parse_info()->script(), abstract_code,
+ false, time_taken_ms, isolate);
+}
+
// ----------------------------------------------------------------------------
-// Implementation of CompilationJob
-
-CompilationJob::CompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
- CompilationInfo* compilation_info,
- const char* compiler_name, State initial_state)
- : parse_info_(parse_info),
- compilation_info_(compilation_info),
- compiler_name_(compiler_name),
- state_(initial_state),
- stack_limit_(stack_limit) {}
-
-CompilationJob::Status CompilationJob::PrepareJob(Isolate* isolate) {
+// Implementation of OptimizedCompilationJob
+
+CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowJavascriptExecution no_js(isolate);
@@ -99,7 +210,7 @@ CompilationJob::Status CompilationJob::PrepareJob(Isolate* isolate) {
return UpdateState(PrepareJobImpl(isolate), State::kReadyToExecute);
}
-CompilationJob::Status CompilationJob::ExecuteJob() {
+CompilationJob::Status OptimizedCompilationJob::ExecuteJob() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -111,7 +222,7 @@ CompilationJob::Status CompilationJob::ExecuteJob() {
return UpdateState(ExecuteJobImpl(), State::kReadyToFinalize);
}
-CompilationJob::Status CompilationJob::FinalizeJob(Isolate* isolate) {
+CompilationJob::Status OptimizedCompilationJob::FinalizeJob(Isolate* isolate) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DisallowCodeDependencyChange no_dependency_change;
DisallowJavascriptExecution no_js(isolate);
@@ -124,37 +235,21 @@ CompilationJob::Status CompilationJob::FinalizeJob(Isolate* isolate) {
return UpdateState(FinalizeJobImpl(isolate), State::kSucceeded);
}
-CompilationJob::Status CompilationJob::RetryOptimization(BailoutReason reason) {
+CompilationJob::Status OptimizedCompilationJob::RetryOptimization(
+ BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->RetryOptimization(reason);
- state_ = State::kFailed;
- return FAILED;
+ return UpdateState(FAILED, State::kFailed);
}
-CompilationJob::Status CompilationJob::AbortOptimization(BailoutReason reason) {
+CompilationJob::Status OptimizedCompilationJob::AbortOptimization(
+ BailoutReason reason) {
DCHECK(compilation_info_->IsOptimizing());
compilation_info_->AbortOptimization(reason);
- state_ = State::kFailed;
- return FAILED;
+ return UpdateState(FAILED, State::kFailed);
}
-void CompilationJob::RecordUnoptimizedCompilationStats(Isolate* isolate) const {
- int code_size;
- if (compilation_info()->has_bytecode_array()) {
- code_size = compilation_info()->bytecode_array()->SizeIncludingMetadata();
- } else {
- code_size = compilation_info()->code()->SizeIncludingMetadata();
- }
-
- Counters* counters = isolate->counters();
- // TODO(4280): Rename counters from "baseline" to "unoptimized" eventually.
- counters->total_baseline_code_size()->Increment(code_size);
- counters->total_baseline_compile_count()->Increment(1);
-
- // TODO(5203): Add timers for each phase of compilation.
-}
-
-void CompilationJob::RecordOptimizedCompilationStats() const {
+void OptimizedCompilationJob::RecordCompilationStats() const {
DCHECK(compilation_info()->IsOptimizing());
Handle<JSFunction> function = compilation_info()->closure();
double ms_creategraph = time_taken_to_prepare_.InMillisecondsF();
@@ -179,65 +274,19 @@ void CompilationJob::RecordOptimizedCompilationStats() const {
}
}
-void CompilationJob::RecordFunctionCompilation(
+void OptimizedCompilationJob::RecordFunctionCompilation(
CodeEventListener::LogEventsAndTags tag, Isolate* isolate) const {
- // Log the code generation. If source information is available include
- // script name and line number. Check explicitly whether logging is
- // enabled as finding the line number is not free.
- CompilationInfo* compilation_info = this->compilation_info();
- if (!isolate->logger()->is_logging_code_events() &&
- !isolate->is_profiling() && !FLAG_log_function_events) {
- return;
- }
-
- Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- Handle<Script> script = parse_info()->script();
Handle<AbstractCode> abstract_code =
- compilation_info->has_bytecode_array()
- ? Handle<AbstractCode>::cast(compilation_info->bytecode_array())
- : Handle<AbstractCode>::cast(compilation_info->code());
-
- if (abstract_code.is_identical_to(BUILTIN_CODE(isolate, CompileLazy))) {
- return;
- }
-
- int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
- int column_num =
- Script::GetColumnNumber(script, shared->start_position()) + 1;
- String* script_name = script->name()->IsString()
- ? String::cast(script->name())
- : isolate->heap()->empty_string();
- CodeEventListener::LogEventsAndTags log_tag =
- Logger::ToNativeByScript(tag, *script);
- PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared,
- script_name, line_num, column_num));
- if (!FLAG_log_function_events) return;
+ Handle<AbstractCode>::cast(compilation_info()->code());
- DisallowHeapAllocation no_gc;
-
- double ms = time_taken_to_prepare_.InMillisecondsF();
- ms += time_taken_to_execute_.InMillisecondsF();
- ms += time_taken_to_finalize_.InMillisecondsF();
+ double time_taken_ms = time_taken_to_prepare_.InMillisecondsF() +
+ time_taken_to_execute_.InMillisecondsF() +
+ time_taken_to_finalize_.InMillisecondsF();
- std::string name = compilation_info->IsOptimizing() ? "optimize" : "compile";
- switch (tag) {
- case CodeEventListener::EVAL_TAG:
- name += "-eval";
- break;
- case CodeEventListener::SCRIPT_TAG:
- break;
- case CodeEventListener::LAZY_COMPILE_TAG:
- name += "-lazy";
- break;
- case CodeEventListener::FUNCTION_TAG:
- break;
- default:
- UNREACHABLE();
- }
-
- LOG(isolate, FunctionEvent(name.c_str(), nullptr, script->id(), ms,
- shared->start_position(), shared->end_position(),
- shared->DebugName()));
+ Handle<Script> script(
+ Script::cast(compilation_info()->shared_info()->script()));
+ LogFunctionCompilation(tag, compilation_info()->shared_info(), script,
+ abstract_code, true, time_taken_ms, isolate);
}
// ----------------------------------------------------------------------------
@@ -245,29 +294,6 @@ void CompilationJob::RecordFunctionCompilation(
namespace {
-void EnsureFeedbackMetadata(CompilationInfo* compilation_info,
- Isolate* isolate) {
- DCHECK(compilation_info->has_shared_info());
-
- // If no type feedback metadata exists, create it. At this point the
- // AstNumbering pass has already run. Note the snapshot can contain outdated
- // vectors for a different configuration, hence we also recreate a new vector
- // when the function is not compiled (i.e. no code was serialized).
-
- // TODO(mvstanton): reintroduce is_empty() predicate to feedback_metadata().
- if (compilation_info->shared_info()->feedback_metadata()->length() == 0 ||
- !compilation_info->shared_info()->is_compiled()) {
- Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
- isolate, compilation_info->feedback_vector_spec());
- compilation_info->shared_info()->set_feedback_metadata(*feedback_metadata);
- }
-
- // It's very important that recompiles do not alter the structure of the type
- // feedback vector. Verify that the structure fits the function literal.
- CHECK(!compilation_info->shared_info()->feedback_metadata()->SpecDiffersFrom(
- compilation_info->feedback_vector_spec()));
-}
-
bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
// Check whether asm.js validation is enabled.
if (!FLAG_validate_asm) return false;
@@ -283,37 +309,77 @@ bool UseAsmWasm(FunctionLiteral* literal, bool asm_wasm_broken) {
return literal->scope()->IsAsmModule();
}
-void InstallUnoptimizedCode(CompilationInfo* compilation_info,
- Isolate* isolate) {
- Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- DCHECK_EQ(compilation_info->shared_info()->language_mode(),
- compilation_info->literal()->language_mode());
+void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared_info,
+ ParseInfo* parse_info, Isolate* isolate) {
+ if (!FLAG_interpreted_frames_native_stack) {
+ shared_info->set_bytecode_array(*bytecode_array);
+ return;
+ }
+
+ Handle<Code> code;
+ {
+ CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
+
+ code = isolate->factory()->CopyCode(
+ BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
+ }
+
+ Handle<InterpreterData> interpreter_data = Handle<InterpreterData>::cast(
+ isolate->factory()->NewStruct(INTERPRETER_DATA_TYPE, TENURED));
- // Ensure feedback metadata is installed.
- EnsureFeedbackMetadata(compilation_info, isolate);
+ interpreter_data->set_bytecode_array(*bytecode_array);
+ interpreter_data->set_interpreter_trampoline(*code);
+
+ shared_info->set_interpreter_data(*interpreter_data);
+
+ Handle<Script> script = parse_info->script();
+ Handle<AbstractCode> abstract_code = Handle<AbstractCode>::cast(code);
+ int line_num =
+ Script::GetLineNumber(script, shared_info->StartPosition()) + 1;
+ int column_num =
+ Script::GetColumnNumber(script, shared_info->StartPosition()) + 1;
+ String* script_name = script->name()->IsString()
+ ? String::cast(script->name())
+ : isolate->heap()->empty_string();
+ CodeEventListener::LogEventsAndTags log_tag = Logger::ToNativeByScript(
+ CodeEventListener::INTERPRETED_FUNCTION_TAG, *script);
+ PROFILE(isolate, CodeCreateEvent(log_tag, *abstract_code, *shared_info,
+ script_name, line_num, column_num));
+}
+
+void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
+ Handle<SharedFunctionInfo> shared_info,
+ ParseInfo* parse_info, Isolate* isolate) {
+ DCHECK_EQ(shared_info->language_mode(),
+ compilation_info->literal()->language_mode());
// Update the shared function info with the scope info.
Handle<ScopeInfo> scope_info = compilation_info->scope()->scope_info();
- shared->set_scope_info(*scope_info);
- Scope* outer_scope = compilation_info->scope()->GetOuterScopeWithContext();
- if (outer_scope) {
- shared->set_outer_scope_info(*outer_scope->scope_info());
- }
+ shared_info->set_scope_info(*scope_info);
- DCHECK(!compilation_info->code().is_null());
- shared->set_code(*compilation_info->code());
if (compilation_info->has_bytecode_array()) {
- DCHECK(!shared->HasBytecodeArray()); // Only compiled once.
+ DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
DCHECK(!compilation_info->has_asm_wasm_data());
- shared->set_bytecode_array(*compilation_info->bytecode_array());
- } else if (compilation_info->has_asm_wasm_data()) {
- shared->set_asm_wasm_data(*compilation_info->asm_wasm_data());
+ DCHECK(!shared_info->HasFeedbackMetadata());
+
+ Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
+ isolate, compilation_info->feedback_vector_spec());
+
+ InstallBytecodeArray(compilation_info->bytecode_array(), shared_info,
+ parse_info, isolate);
+ shared_info->set_feedback_metadata(*feedback_metadata);
+ } else {
+ DCHECK(compilation_info->has_asm_wasm_data());
+ shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
+ shared_info->set_feedback_metadata(
+ isolate->heap()->empty_feedback_metadata());
}
// Install coverage info on the shared function info.
if (compilation_info->has_coverage_info()) {
DCHECK(isolate->is_block_code_coverage());
- isolate->debug()->InstallCoverageInfo(compilation_info->shared_info(),
+ isolate->debug()->InstallCoverageInfo(shared_info,
compilation_info->coverage_info());
}
}
@@ -327,7 +393,7 @@ void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* parse_info,
parse_info->max_function_literal_id() + 1);
return;
}
- Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
+ Handle<WeakFixedArray> infos(isolate->factory()->NewWeakFixedArray(
parse_info->max_function_literal_id() + 1));
parse_info->script()->set_shared_function_infos(*infos);
}
@@ -346,17 +412,17 @@ void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
}
}
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job,
- Isolate* isolate) {
- CompilationInfo* compilation_info = job->compilation_info();
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(
+ UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
ParseInfo* parse_info = job->parse_info();
- SetSharedFunctionFlagsFromLiteral(compilation_info->literal(),
- compilation_info->shared_info());
+ SetSharedFunctionFlagsFromLiteral(compilation_info->literal(), shared_info);
- CompilationJob::Status status = job->FinalizeJob(isolate);
+ CompilationJob::Status status = job->FinalizeJob(shared_info, isolate);
if (status == CompilationJob::SUCCEEDED) {
- InstallUnoptimizedCode(compilation_info, isolate);
+ InstallUnoptimizedCode(compilation_info, shared_info, parse_info, isolate);
CodeEventListener::LogEventsAndTags log_tag;
if (parse_info->is_toplevel()) {
log_tag = compilation_info->is_eval() ? CodeEventListener::EVAL_TAG
@@ -365,17 +431,18 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job,
log_tag = parse_info->lazy_compile() ? CodeEventListener::LAZY_COMPILE_TAG
: CodeEventListener::FUNCTION_TAG;
}
- job->RecordFunctionCompilation(log_tag, isolate);
- job->RecordUnoptimizedCompilationStats(isolate);
+ job->RecordFunctionCompilation(log_tag, shared_info, isolate);
+ job->RecordCompilationStats(isolate);
}
return status;
}
-std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJobs(
+std::unique_ptr<UnoptimizedCompilationJob> ExecuteUnoptimizedCompileJobs(
ParseInfo* parse_info, FunctionLiteral* literal,
- AccountingAllocator* allocator, CompilationJobList* inner_function_jobs) {
+ AccountingAllocator* allocator,
+ UnoptimizedCompilationJobList* inner_function_jobs) {
if (UseAsmWasm(literal, parse_info->is_asm_wasm_broken())) {
- std::unique_ptr<CompilationJob> asm_job(
+ std::unique_ptr<UnoptimizedCompilationJob> asm_job(
AsmJs::NewCompilationJob(parse_info, literal, allocator));
if (asm_job->ExecuteJob() == CompilationJob::SUCCEEDED) {
return asm_job;
@@ -387,45 +454,45 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJobs(
// through to standard unoptimized compile.
}
ZoneVector<FunctionLiteral*> eager_inner_literals(0, parse_info->zone());
- std::unique_ptr<CompilationJob> job(
+ std::unique_ptr<UnoptimizedCompilationJob> job(
interpreter::Interpreter::NewCompilationJob(
parse_info, literal, allocator, &eager_inner_literals));
if (job->ExecuteJob() != CompilationJob::SUCCEEDED) {
// Compilation failed, return null.
- return std::unique_ptr<CompilationJob>();
+ return std::unique_ptr<UnoptimizedCompilationJob>();
}
// Recursively compile eager inner literals.
for (FunctionLiteral* inner_literal : eager_inner_literals) {
- std::unique_ptr<CompilationJob> inner_job(
- PrepareAndExecuteUnoptimizedCompileJobs(
- parse_info, inner_literal, allocator, inner_function_jobs));
+ std::unique_ptr<UnoptimizedCompilationJob> inner_job(
+ ExecuteUnoptimizedCompileJobs(parse_info, inner_literal, allocator,
+ inner_function_jobs));
// Compilation failed, return null.
- if (!inner_job) return std::unique_ptr<CompilationJob>();
+ if (!inner_job) return std::unique_ptr<UnoptimizedCompilationJob>();
inner_function_jobs->emplace_front(std::move(inner_job));
}
return job;
}
-std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
+std::unique_ptr<UnoptimizedCompilationJob> GenerateUnoptimizedCode(
ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs) {
+ UnoptimizedCompilationJobList* inner_function_jobs) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DCHECK(inner_function_jobs->empty());
if (!Compiler::Analyze(parse_info)) {
- return std::unique_ptr<CompilationJob>();
+ return std::unique_ptr<UnoptimizedCompilationJob>();
}
// Prepare and execute compilation of the outer-most function.
- std::unique_ptr<CompilationJob> outer_function_job(
- PrepareAndExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
- allocator, inner_function_jobs));
- if (!outer_function_job) return std::unique_ptr<CompilationJob>();
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
+ ExecuteUnoptimizedCompileJobs(parse_info, parse_info->literal(),
+ allocator, inner_function_jobs));
+ if (!outer_function_job) return std::unique_ptr<UnoptimizedCompilationJob>();
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
@@ -433,10 +500,11 @@ std::unique_ptr<CompilationJob> GenerateUnoptimizedCode(
return outer_function_job;
}
-bool FinalizeUnoptimizedCode(ParseInfo* parse_info, Isolate* isolate,
- Handle<SharedFunctionInfo> shared_info,
- CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs) {
+bool FinalizeUnoptimizedCode(
+ ParseInfo* parse_info, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared_info,
+ UnoptimizedCompilationJob* outer_function_job,
+ UnoptimizedCompilationJobList* inner_function_jobs) {
DCHECK(AllowCompilation::IsAllowed(isolate));
// Allocate scope infos for the literal.
@@ -444,9 +512,8 @@ bool FinalizeUnoptimizedCode(ParseInfo* parse_info, Isolate* isolate,
AnalyzeMode::kRegular);
// Finalize the outer-most function's compilation job.
- outer_function_job->compilation_info()->set_shared_info(shared_info);
- if (FinalizeUnoptimizedCompilationJob(outer_function_job, isolate) !=
- CompilationJob::SUCCEEDED) {
+ if (FinalizeUnoptimizedCompilationJob(outer_function_job, shared_info,
+ isolate) != CompilationJob::SUCCEEDED) {
return false;
}
@@ -459,8 +526,8 @@ bool FinalizeUnoptimizedCode(ParseInfo* parse_info, Isolate* isolate,
// The inner function might be compiled already if compiling for debug.
// TODO(rmcilroy): Fix this and DCHECK !is_compiled() once Full-Codegen dies
if (inner_shared_info->is_compiled()) continue;
- inner_job->compilation_info()->set_shared_info(inner_shared_info);
- if (FinalizeUnoptimizedCompilationJob(inner_job.get(), isolate) !=
+ if (FinalizeUnoptimizedCompilationJob(inner_job.get(), inner_shared_info,
+ isolate) !=
CompilationJob::SUCCEEDED) {
return false;
}
@@ -475,7 +542,7 @@ bool FinalizeUnoptimizedCode(ParseInfo* parse_info, Isolate* isolate,
return true;
}
-MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
+V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BailoutId osr_offset) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
@@ -500,7 +567,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
return MaybeHandle<Code>();
}
-void ClearOptimizedCodeCache(CompilationInfo* compilation_info) {
+void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
Handle<JSFunction> function = compilation_info->closure();
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
@@ -509,7 +576,8 @@ void ClearOptimizedCodeCache(CompilationInfo* compilation_info) {
}
}
-void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
+void InsertCodeIntoOptimizedCodeCache(
+ OptimizedCompilationInfo* compilation_info) {
Handle<Code> code = compilation_info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
@@ -533,11 +601,11 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
}
}
-bool GetOptimizedCodeNow(CompilationJob* job, Isolate* isolate) {
+bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(
isolate, RuntimeCallCounterId::kRecompileSynchronous);
- CompilationInfo* compilation_info = job->compilation_info();
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.RecompileSynchronous");
@@ -554,15 +622,15 @@ bool GetOptimizedCodeNow(CompilationJob* job, Isolate* isolate) {
}
// Success!
- job->RecordOptimizedCompilationStats();
+ job->RecordCompilationStats();
DCHECK(!isolate->has_pending_exception());
InsertCodeIntoOptimizedCodeCache(compilation_info);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, isolate);
return true;
}
-bool GetOptimizedCodeLater(CompilationJob* job, Isolate* isolate) {
- CompilationInfo* compilation_info = job->compilation_info();
+bool GetOptimizedCodeLater(OptimizedCompilationJob* job, Isolate* isolate) {
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Compilation queue full, will retry optimizing ");
@@ -641,10 +709,9 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
- std::unique_ptr<CompilationJob> job(
+ std::unique_ptr<OptimizedCompilationJob> job(
compiler::Pipeline::NewCompilationJob(function, has_script));
- CompilationInfo* compilation_info = job->compilation_info();
- ParseInfo* parse_info = job->parse_info();
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
compilation_info->SetOptimizingForOsr(osr_offset, osr_frame);
@@ -688,7 +755,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Reopen handles in the new CompilationHandleScope.
compilation_info->ReopenHandlesInNewHandleScope();
- parse_info->ReopenHandlesInNewHandleScope();
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(job.get(), isolate)) {
@@ -696,11 +762,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Set the optimization marker and return a code object which checks it.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
- if (function->IsInterpreted()) {
- return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
- } else {
- return BUILTIN_CODE(isolate, CheckOptimizationMarker);
- }
+ DCHECK(function->IsInterpreted() ||
+ (!function->is_compiled() && function->shared()->IsInterpreted()));
+ DCHECK(function->shared()->HasBytecodeArray());
+ return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
} else {
if (GetOptimizedCodeNow(job.get(), isolate))
@@ -711,9 +776,9 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
-CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
- Isolate* isolate) {
- CompilationInfo* compilation_info = job->compilation_info();
+CompilationJob::Status FinalizeOptimizedCompilationJob(
+ OptimizedCompilationJob* job, Isolate* isolate) {
+ OptimizedCompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(
@@ -739,7 +804,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
} else if (compilation_info->dependencies()->HasAborted()) {
job->RetryOptimization(BailoutReason::kBailedOutDueToDependencyChange);
} else if (job->FinalizeJob(isolate) == CompilationJob::SUCCEEDED) {
- job->RecordOptimizedCompilationStats();
+ job->RecordCompilationStats();
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
InsertCodeIntoOptimizedCodeCache(compilation_info);
@@ -760,7 +825,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job,
PrintF(" because: %s]\n",
GetBailoutReason(compilation_info->bailout_reason()));
}
- compilation_info->closure()->set_code(shared->code());
+ compilation_info->closure()->set_code(shared->GetCode());
// Clear the InOptimizationQueue marker, if it exists.
if (compilation_info->closure()->IsInOptimizationQueue()) {
compilation_info->closure()->ClearOptimizationMarker();
@@ -784,8 +849,9 @@ bool FailWithPendingException(Isolate* isolate, ParseInfo* parse_info,
}
MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
- ParseInfo* parse_info, Isolate* isolate, CompilationJob* outer_function_job,
- CompilationJobList* inner_function_jobs) {
+ ParseInfo* parse_info, Isolate* isolate,
+ UnoptimizedCompilationJob* outer_function_job,
+ UnoptimizedCompilationJobList* inner_function_jobs) {
Handle<Script> script = parse_info->script();
// Internalize ast values onto the heap.
@@ -797,9 +863,8 @@ MaybeHandle<SharedFunctionInfo> FinalizeTopLevel(
DCHECK_EQ(kNoSourcePosition,
parse_info->literal()->function_token_position());
Handle<SharedFunctionInfo> shared_info =
- isolate->factory()->NewSharedFunctionInfoForLiteral(parse_info->literal(),
- parse_info->script());
- shared_info->set_is_toplevel(true);
+ isolate->factory()->NewSharedFunctionInfoForLiteral(
+ parse_info->literal(), parse_info->script(), true);
// Finalize compilation of the unoptimized bytecode or asm-js data.
if (!FinalizeUnoptimizedCode(parse_info, isolate, shared_info,
@@ -843,9 +908,10 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Generate the unoptimized bytecode or asm-js data.
- CompilationJobList inner_function_jobs;
- std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
- parse_info, isolate->allocator(), &inner_function_jobs));
+ UnoptimizedCompilationJobList inner_function_jobs;
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(parse_info, isolate->allocator(),
+ &inner_function_jobs));
if (!outer_function_job) {
FailWithPendingException(isolate, parse_info,
Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
@@ -856,9 +922,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(ParseInfo* parse_info,
&inner_function_jobs);
}
-std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
+std::unique_ptr<UnoptimizedCompilationJob> CompileTopLevelOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
- CompilationJobList* inner_function_jobs) {
+ UnoptimizedCompilationJobList* inner_function_jobs) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
@@ -877,7 +943,7 @@ std::unique_ptr<CompilationJob> CompileTopLevelOnBackgroundThread(
DCHECK(!parse_info->consumed_preparsed_scope_data()->HasData());
// Generate the unoptimized bytecode or asm-js data.
- std::unique_ptr<CompilationJob> outer_function_job(
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
GenerateUnoptimizedCode(parse_info, allocator, inner_function_jobs));
return outer_function_job;
}
@@ -891,7 +957,6 @@ class BackgroundCompileTask : public ScriptCompiler::ScriptStreamingTask {
private:
ScriptStreamingData* source_; // Not owned.
int stack_size_;
- ScriptData* script_data_;
AccountingAllocator* allocator_;
TimedHistogram* timer_;
@@ -902,7 +967,6 @@ BackgroundCompileTask::BackgroundCompileTask(ScriptStreamingData* source,
Isolate* isolate)
: source_(source),
stack_size_(i::FLAG_stack_size),
- script_data_(nullptr),
timer_(isolate->counters()->compile_script_on_background()) {
VMState<PARSER> state(isolate);
@@ -955,21 +1019,12 @@ void BackgroundCompileTask::Run() {
source_->parser->set_stack_limit(stack_limit);
source_->parser->ParseOnBackground(source_->info.get());
- if (FLAG_background_compile && source_->info->literal() != nullptr) {
+ if (source_->info->literal() != nullptr) {
// Parsing has succeeded, compile.
source_->outer_function_job = CompileTopLevelOnBackgroundThread(
source_->info.get(), allocator_, &source_->inner_function_jobs);
}
- if (script_data_ != nullptr) {
- source_->cached_data.reset(new ScriptCompiler::CachedData(
- script_data_->data(), script_data_->length(),
- ScriptCompiler::CachedData::BufferOwned));
- script_data_->ReleaseDataOwnership();
- delete script_data_;
- script_data_ = nullptr;
- }
-
source_->info->EmitBackgroundParseStatisticsOnBackgroundThread();
source_->info->set_on_background_thread(false);
@@ -1008,6 +1063,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
DCHECK(!shared_info->is_compiled());
Isolate* isolate = shared_info->GetIsolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
DCHECK(!isolate->has_pending_exception());
DCHECK(!shared_info->HasBytecodeArray());
@@ -1049,9 +1105,10 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
}
// Generate the unoptimized bytecode or asm-js data.
- CompilationJobList inner_function_jobs;
- std::unique_ptr<CompilationJob> outer_function_job(GenerateUnoptimizedCode(
- &parse_info, isolate->allocator(), &inner_function_jobs));
+ UnoptimizedCompilationJobList inner_function_jobs;
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job(
+ GenerateUnoptimizedCode(&parse_info, isolate->allocator(),
+ &inner_function_jobs));
if (!outer_function_job) {
return FailWithPendingException(isolate, &parse_info, flag);
}
@@ -1079,11 +1136,10 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared_info = handle(function->shared());
- DCHECK(AllowCompilation::IsAllowed(isolate));
// Ensure shared function info is compiled.
if (!shared_info->is_compiled() && !Compile(shared_info, flag)) return false;
- Handle<Code> code = handle(shared_info->code(), isolate);
+ Handle<Code> code = handle(shared_info->GetCode(), isolate);
// Allocate FeedbackVector for the JSFunction.
JSFunction::EnsureFeedbackVector(function);
@@ -1125,7 +1181,8 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
// already if we are optimizing.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared()->is_compiled());
- code = handle(function->shared()->code(), isolate);
+ DCHECK(function->shared()->IsInterpreted());
+ code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
// Install code on closure.
@@ -1151,9 +1208,9 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
// In order to ensure that live edit function info collection finds the newly
// generated shared function infos, clear the script's list temporarily
// and restore it at the end of this method.
- Handle<FixedArray> old_function_infos(script->shared_function_infos(),
- isolate);
- script->set_shared_function_infos(isolate->heap()->empty_fixed_array());
+ Handle<WeakFixedArray> old_function_infos(script->shared_function_infos(),
+ isolate);
+ script->set_shared_function_infos(isolate->heap()->empty_weak_fixed_array());
// Start a compilation.
ParseInfo parse_info(script);
@@ -1307,56 +1364,6 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
- Handle<String> source, Handle<FixedArray> arguments,
- Handle<Context> context, int line_offset, int column_offset,
- Handle<Object> script_name, ScriptOriginOptions options) {
- Isolate* isolate = source->GetIsolate();
- int source_length = source->length();
- isolate->counters()->total_compile_size()->Increment(source_length);
-
- Handle<Script> script = isolate->factory()->NewScript(source);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
- if (!script_name.is_null()) {
- script->set_name(*script_name);
- script->set_line_offset(line_offset);
- script->set_column_offset(column_offset);
- }
- script->set_wrapped_arguments(*arguments);
- script->set_origin_options(options);
-
- ParseInfo parse_info(script);
- parse_info.set_eval(); // Use an eval scope as declaration scope.
- parse_info.set_wrapped_as_function();
- if (!context->IsNativeContext()) {
- parse_info.set_outer_scope_info(handle(context->scope_info()));
- }
-
- Handle<SharedFunctionInfo> top_level;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level,
- CompileToplevel(&parse_info, isolate), JSFunction);
-
- Handle<JSFunction> top_level_fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(top_level, context,
- NOT_TENURED);
-
- // TODO(yangguo): consider not having to call the top-level function, and
- // instead instantiate the wrapper function directly.
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, top_level_fun, isolate->global_proxy(), 0,
- nullptr),
- JSFunction);
-
- // OnAfterCompile has to be called after we create the JSFunction, which we
- // may require to recompile the eval for debugging, if we find a function
- // that contains break points in the eval script.
- isolate->debug()->OnAfterCompile(script);
- return Handle<JSFunction>::cast(result);
-}
bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context,
@@ -1637,17 +1644,17 @@ Handle<Script> NewScript(Isolate* isolate, Handle<String> source,
MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
Handle<String> source, const Compiler::ScriptDetails& script_details,
ScriptOriginOptions origin_options, v8::Extension* extension,
- ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ ScriptData* cached_data, ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
- cached_data = nullptr;
+ DCHECK_NULL(cached_data);
} else {
DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
- DCHECK(cached_data && *cached_data);
+ DCHECK(cached_data);
DCHECK_NULL(extension);
}
int source_length = source->length();
@@ -1683,16 +1690,13 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
- if (CodeSerializer::Deserialize(isolate, *cached_data, source)
+ if (CodeSerializer::Deserialize(isolate, cached_data, source)
.ToHandle(&inner_result)) {
// Promote to per-isolate compilation cache.
DCHECK(inner_result->is_compiled());
compilation_cache->PutScript(source, isolate->native_context(),
language_mode, inner_result);
Handle<Script> script(Script::cast(inner_result->script()), isolate);
- if (isolate->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
maybe_result = inner_result;
} else {
// Deserializer failed. Fall through to compile.
@@ -1736,6 +1740,91 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
+ Handle<String> source, Handle<FixedArray> arguments,
+ Handle<Context> context, const Compiler::ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptData* cached_data,
+ v8::ScriptCompiler::CompileOptions compile_options,
+ v8::ScriptCompiler::NoCacheReason no_cache_reason) {
+ Isolate* isolate = source->GetIsolate();
+ ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
+
+ if (compile_options == ScriptCompiler::kNoCompileOptions ||
+ compile_options == ScriptCompiler::kEagerCompile) {
+ DCHECK_NULL(cached_data);
+ } else {
+ DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
+ DCHECK(cached_data);
+ }
+
+ int source_length = source->length();
+ isolate->counters()->total_compile_size()->Increment(source_length);
+
+ LanguageMode language_mode = construct_language_mode(FLAG_use_strict);
+
+ MaybeHandle<SharedFunctionInfo> maybe_result;
+ bool can_consume_code_cache =
+ compile_options == ScriptCompiler::kConsumeCodeCache &&
+ !isolate->debug()->is_loaded();
+ if (can_consume_code_cache) {
+ compile_timer.set_consuming_code_cache();
+ // Then check cached code provided by embedder.
+ HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, RuntimeCallCounterId::kCompileDeserialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+ "V8.CompileDeserialize");
+ maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source);
+ if (maybe_result.is_null()) {
+ // Deserializer failed. Fall through to compile.
+ compile_timer.set_consuming_code_cache_failed();
+ }
+ }
+
+ Handle<SharedFunctionInfo> wrapped;
+ Handle<Script> script;
+ if (!maybe_result.ToHandle(&wrapped)) {
+ script = NewScript(isolate, source, script_details, origin_options,
+ NOT_NATIVES_CODE);
+ script->set_wrapped_arguments(*arguments);
+
+ ParseInfo parse_info(script);
+ parse_info.set_eval(); // Use an eval scope as declaration scope.
+ parse_info.set_wrapped_as_function();
+ // parse_info.set_eager(compile_options == ScriptCompiler::kEagerCompile);
+ if (!context->IsNativeContext()) {
+ parse_info.set_outer_scope_info(handle(context->scope_info()));
+ }
+ parse_info.set_language_mode(
+ stricter_language_mode(parse_info.language_mode(), language_mode));
+
+ Handle<SharedFunctionInfo> top_level;
+ maybe_result = CompileToplevel(&parse_info, isolate);
+ if (maybe_result.is_null()) isolate->ReportPendingMessages();
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, top_level, maybe_result, JSFunction);
+
+ SharedFunctionInfo::ScriptIterator infos(script);
+ while (SharedFunctionInfo* info = infos.Next()) {
+ if (info->is_wrapped()) {
+ wrapped = Handle<SharedFunctionInfo>(info, isolate);
+ break;
+ }
+ }
+ DCHECK(!wrapped.is_null());
+ } else {
+ script = Handle<Script>(Script::cast(wrapped->script()), isolate);
+ }
+
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(wrapped, context,
+ NOT_TENURED);
+ // OnAfterCompile has to be called after we create the JSFunction, which we
+ // may require to recompile the eval for debugging, if we find a function
+ // that contains break points in the eval script.
+ isolate->debug()->OnAfterCompile(script);
+ return function;
+}
+
ScriptCompiler::ScriptStreamingTask* Compiler::NewBackgroundCompileTask(
ScriptStreamingData* source, Isolate* isolate) {
return new BackgroundCompileTask(source, isolate);
@@ -1784,21 +1873,14 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, script, parse_info->ast_value_factory());
} else {
// Parsing has succeeded - finalize compilation.
- if (i::FLAG_background_compile) {
- // Finalize background compilation.
- if (streaming_data->outer_function_job) {
- maybe_result = FinalizeTopLevel(
- parse_info, isolate, streaming_data->outer_function_job.get(),
- &streaming_data->inner_function_jobs);
- } else {
- // Compilation failed on background thread - throw an exception.
- FailWithPendingException(
- isolate, parse_info,
- Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
- }
+ if (streaming_data->outer_function_job) {
+ maybe_result = FinalizeTopLevel(
+ parse_info, isolate, streaming_data->outer_function_job.get(),
+ &streaming_data->inner_function_jobs);
} else {
- // Compilation on main thread.
- maybe_result = CompileToplevel(parse_info, isolate);
+ // Compilation failed on background thread - throw an exception.
+ FailWithPendingException(isolate, parse_info,
+ Compiler::ClearExceptionFlag::KEEP_EXCEPTION);
}
}
@@ -1837,12 +1919,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
// Allocate a shared function info object which will be compiled lazily.
Handle<SharedFunctionInfo> result =
- isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script);
- result->set_is_toplevel(false);
- Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
- if (outer_scope) {
- result->set_outer_scope_info(*outer_scope->scope_info());
- }
+ isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script,
+ false);
return result;
}
@@ -1855,20 +1933,23 @@ MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
osr_frame);
}
-bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job,
+bool Compiler::FinalizeCompilationJob(OptimizedCompilationJob* raw_job,
Isolate* isolate) {
+ VMState<COMPILER> state(isolate);
// Take ownership of compilation job. Deleting job also tears down the zone.
- std::unique_ptr<CompilationJob> job(raw_job);
+ std::unique_ptr<OptimizedCompilationJob> job(raw_job);
+ return FinalizeOptimizedCompilationJob(job.get(), isolate) ==
+ CompilationJob::SUCCEEDED;
+}
- if (job->compilation_info()->IsOptimizing()) {
- VMState<COMPILER> state(isolate);
- return FinalizeOptimizedCompilationJob(job.get(), isolate) ==
- CompilationJob::SUCCEEDED;
- } else {
- VMState<BYTECODE_COMPILER> state(isolate);
- return FinalizeUnoptimizedCompilationJob(job.get(), isolate) ==
- CompilationJob::SUCCEEDED;
- }
+bool Compiler::FinalizeCompilationJob(UnoptimizedCompilationJob* raw_job,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ VMState<BYTECODE_COMPILER> state(isolate);
+ // Take ownership of compilation job. Deleting job also tears down the zone.
+ std::unique_ptr<UnoptimizedCompilationJob> job(raw_job);
+ return FinalizeUnoptimizedCompilationJob(job.get(), shared_info, isolate) ==
+ CompilationJob::SUCCEEDED;
}
void Compiler::PostInstantiation(Handle<JSFunction> function,
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index ca6b0893d0..1477b4f3f1 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -20,15 +20,18 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class CompilationInfo;
-class CompilationJob;
class JavaScriptFrame;
+class OptimizedCompilationInfo;
+class OptimizedCompilationJob;
class ParseInfo;
class Parser;
class ScriptData;
struct ScriptStreamingData;
+class UnoptimizedCompilationInfo;
+class UnoptimizedCompilationJob;
-typedef std::forward_list<std::unique_ptr<CompilationJob>> CompilationJobList;
+typedef std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>
+ UnoptimizedCompilationJobList;
// The V8 compiler API.
//
@@ -64,7 +67,11 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
ScriptStreamingData* streaming_data, Isolate* isolate);
// Generate and install code from previously queued compilation job.
- static bool FinalizeCompilationJob(CompilationJob* job, Isolate* isolate);
+ static bool FinalizeCompilationJob(UnoptimizedCompilationJob* job,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate);
+ static bool FinalizeCompilationJob(OptimizedCompilationJob* job,
+ Isolate* isolate);
// Give the compiler a chance to perform low-latency initialization tasks of
// the given {function} on its instantiation. Note that only the runtime will
@@ -88,7 +95,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// real function with a context.
// Create a (bound) function for a String source within a context for eval.
- MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
Handle<Context> context, LanguageMode language_mode,
ParseRestriction restriction, int parameters_end_pos,
@@ -96,13 +103,26 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
ScriptOriginOptions options = ScriptOriginOptions());
+ struct ScriptDetails {
+ ScriptDetails() : line_offset(0), column_offset(0) {}
+ explicit ScriptDetails(Handle<Object> script_name)
+ : line_offset(0), column_offset(0), name_obj(script_name) {}
+
+ int line_offset;
+ int column_offset;
+ i::MaybeHandle<i::Object> name_obj;
+ i::MaybeHandle<i::Object> source_map_url;
+ i::MaybeHandle<i::FixedArray> host_defined_options;
+ };
+
// Create a function that results from wrapping |source| in a function,
// with |arguments| being a list of parameters for that function.
- MUST_USE_RESULT static MaybeHandle<JSFunction> GetWrappedFunction(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
- Handle<Context> context, int line_offset = 0, int column_offset = 0,
- Handle<Object> script_name = Handle<Object>(),
- ScriptOriginOptions options = ScriptOriginOptions());
+ Handle<Context> context, const ScriptDetails& script_details,
+ ScriptOriginOptions origin_options, ScriptData* cached_data,
+ v8::ScriptCompiler::CompileOptions compile_options,
+ v8::ScriptCompiler::NoCacheReason no_cache_reason);
// Returns true if the embedder permits compiling the given source string in
// the given context.
@@ -111,27 +131,15 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
Handle<String> source);
// Create a (bound) function for a String source within a context for eval.
- MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
Handle<Context> context, Handle<String> source,
ParseRestriction restriction, int parameters_end_pos);
- struct ScriptDetails {
- ScriptDetails() : line_offset(0), column_offset(0) {}
- explicit ScriptDetails(Handle<Object> script_name)
- : line_offset(0), column_offset(0), name_obj(script_name) {}
-
- int line_offset;
- int column_offset;
- i::MaybeHandle<i::Object> name_obj;
- i::MaybeHandle<i::Object> source_map_url;
- i::MaybeHandle<i::FixedArray> host_defined_options;
- };
-
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Handle<String> source, const ScriptDetails& script_details,
ScriptOriginOptions origin_options, v8::Extension* extension,
- ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ ScriptData* cached_data, ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -160,20 +168,13 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// instead of generating JIT code for a function at all.
// Generate and return optimized code for OSR, or empty handle on failure.
- MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
Handle<JSFunction> function, BailoutId osr_offset,
JavaScriptFrame* osr_frame);
};
// A base class for compilation jobs intended to run concurrent to the main
-// thread. The job is split into three phases which are called in sequence on
-// different threads and with different limitations:
-// 1) PrepareJob: Runs on main thread. No major limitations.
-// 2) ExecuteJob: Runs concurrently. No heap allocation or handle derefs.
-// 3) FinalizeJob: Runs on main thread. No dependency changes.
-//
-// Each of the three phases can either fail or succeed. The current state of
-// the job can be checked using {state()}.
+// thread. The current state of the job can be checked using {state()}.
class V8_EXPORT_PRIVATE CompilationJob {
public:
enum Status { SUCCEEDED, FAILED };
@@ -184,20 +185,105 @@ class V8_EXPORT_PRIVATE CompilationJob {
kSucceeded,
kFailed,
};
- CompilationJob(uintptr_t stack_limit, ParseInfo* parse_info,
- CompilationInfo* compilation_info, const char* compiler_name,
- State initial_state = State::kReadyToPrepare);
+
+ CompilationJob(uintptr_t stack_limit, State initial_state)
+ : state_(initial_state), stack_limit_(stack_limit) {}
virtual ~CompilationJob() {}
+ void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+ uintptr_t stack_limit() const { return stack_limit_; }
+
+ State state() const { return state_; }
+
+ protected:
+ V8_WARN_UNUSED_RESULT Status UpdateState(Status status, State next_state) {
+ if (status == SUCCEEDED) {
+ state_ = next_state;
+ } else {
+ state_ = State::kFailed;
+ }
+ return status;
+ }
+
+ private:
+ State state_;
+ uintptr_t stack_limit_;
+};
+
+// A base class for unoptimized compilation jobs.
+//
+// The job is split into two phases which are called in sequence on
+// different threads and with different limitations:
+// 1) ExecuteJob: Runs concurrently. No heap allocation or handle derefs.
+// 2) FinalizeJob: Runs on main thread. No dependency changes.
+//
+// Either of phases can either fail or succeed.
+class UnoptimizedCompilationJob : public CompilationJob {
+ public:
+ UnoptimizedCompilationJob(intptr_t stack_limit, ParseInfo* parse_info,
+ UnoptimizedCompilationInfo* compilation_info)
+ : CompilationJob(stack_limit, State::kReadyToExecute),
+ parse_info_(parse_info),
+ compilation_info_(compilation_info) {}
+
+ // Executes the compile job. Can be called on a background thread.
+ V8_WARN_UNUSED_RESULT Status ExecuteJob();
+
+ // Finalizes the compile job. Must be called on the main thread.
+ V8_WARN_UNUSED_RESULT Status
+ FinalizeJob(Handle<SharedFunctionInfo> shared_info, Isolate* isolate);
+
+ void RecordCompilationStats(Isolate* isolate) const;
+ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
+ Handle<SharedFunctionInfo> shared,
+ Isolate* isolate) const;
+
+ ParseInfo* parse_info() const { return parse_info_; }
+ UnoptimizedCompilationInfo* compilation_info() const {
+ return compilation_info_;
+ }
+
+ protected:
+ // Overridden by the actual implementation.
+ virtual Status ExecuteJobImpl() = 0;
+ virtual Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) = 0;
+
+ private:
+ ParseInfo* parse_info_;
+ UnoptimizedCompilationInfo* compilation_info_;
+ base::TimeDelta time_taken_to_execute_;
+ base::TimeDelta time_taken_to_finalize_;
+};
+
+// A base class for optimized compilation jobs.
+//
+// The job is split into three phases which are called in sequence on
+// different threads and with different limitations:
+// 1) PrepareJob: Runs on main thread. No major limitations.
+// 2) ExecuteJob: Runs concurrently. No heap allocation or handle derefs.
+// 3) FinalizeJob: Runs on main thread. No dependency changes.
+//
+// Each of the three phases can either fail or succeed.
+class OptimizedCompilationJob : public CompilationJob {
+ public:
+ OptimizedCompilationJob(uintptr_t stack_limit,
+ OptimizedCompilationInfo* compilation_info,
+ const char* compiler_name,
+ State initial_state = State::kReadyToPrepare)
+ : CompilationJob(stack_limit, initial_state),
+ compilation_info_(compilation_info),
+ compiler_name_(compiler_name) {}
+
// Prepare the compile job. Must be called on the main thread.
- MUST_USE_RESULT Status PrepareJob(Isolate* isolate);
+ V8_WARN_UNUSED_RESULT Status PrepareJob(Isolate* isolate);
// Executes the compile job. Can be called on a background thread if
// can_execute_on_background_thread() returns true.
- MUST_USE_RESULT Status ExecuteJob();
+ V8_WARN_UNUSED_RESULT Status ExecuteJob();
// Finalizes the compile job. Must be called on the main thread.
- MUST_USE_RESULT Status FinalizeJob(Isolate* isolate);
+ V8_WARN_UNUSED_RESULT Status FinalizeJob(Isolate* isolate);
// Report a transient failure, try again next time. Should only be called on
// optimization compilation jobs.
@@ -207,17 +293,13 @@ class V8_EXPORT_PRIVATE CompilationJob {
// Should only be called on optimization compilation jobs.
Status AbortOptimization(BailoutReason reason);
- void RecordOptimizedCompilationStats() const;
- void RecordUnoptimizedCompilationStats(Isolate* isolate) const;
+ void RecordCompilationStats() const;
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Isolate* isolate) const;
- void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
- uintptr_t stack_limit() const { return stack_limit_; }
-
- State state() const { return state_; }
- ParseInfo* parse_info() const { return parse_info_; }
- CompilationInfo* compilation_info() const { return compilation_info_; }
+ OptimizedCompilationInfo* compilation_info() const {
+ return compilation_info_;
+ }
virtual size_t AllocatedMemory() const { return 0; }
protected:
@@ -227,24 +309,11 @@ class V8_EXPORT_PRIVATE CompilationJob {
virtual Status FinalizeJobImpl(Isolate* isolate) = 0;
private:
- // TODO(6409): Remove parse_info once Fullcode and AstGraphBuilder are gone.
- ParseInfo* parse_info_;
- CompilationInfo* compilation_info_;
+ OptimizedCompilationInfo* compilation_info_;
base::TimeDelta time_taken_to_prepare_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
const char* compiler_name_;
- State state_;
- uintptr_t stack_limit_;
-
- MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
- if (status == SUCCEEDED) {
- state_ = next_state;
- } else {
- state_ = State::kFailed;
- }
- return status;
- }
};
// Contains all data which needs to be transmitted between threads for
@@ -269,8 +338,8 @@ struct ScriptStreamingData {
std::unique_ptr<Parser> parser;
// Data needed for finalizing compilation after background compilation.
- std::unique_ptr<CompilationJob> outer_function_job;
- CompilationJobList inner_function_jobs;
+ std::unique_ptr<UnoptimizedCompilationJob> outer_function_job;
+ UnoptimizedCompilationJobList inner_function_jobs;
DISALLOW_COPY_AND_ASSIGN(ScriptStreamingData);
};
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index f250db84b9..e7c4b7542f 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -15,8 +15,10 @@ bbudge@chromium.org
gdeepti@chromium.org
per-file wasm-*=ahaas@chromium.org
+per-file wasm-*=binji@chromium.org
per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
+per-file wasm-*=kschimpf@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index e187d7170c..bbef35570c 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -729,7 +729,7 @@ FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
}
// static
-FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
+FieldAccess AccessBuilder::ForJSArrayIteratorIteratedObject() {
FieldAccess access = {kTaggedBase,
JSArrayIterator::kIteratedObjectOffset,
Handle<Name>(),
@@ -741,8 +741,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
}
// static
-FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
- ElementsKind elements_kind) {
+FieldAccess AccessBuilder::ForJSArrayIteratorNextIndex() {
// In generic case, cap to 2^53-1 (per ToLength() in spec) via
// kPositiveSafeInteger
FieldAccess access = {kTaggedBase,
@@ -752,33 +751,18 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
TypeCache::Get().kPositiveSafeInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
- if (instance_type == JS_ARRAY_TYPE) {
- if (IsDoubleElementsKind(elements_kind)) {
- access.type = TypeCache::Get().kFixedDoubleArrayLengthType;
- access.machine_type = MachineType::TaggedSigned();
- access.write_barrier_kind = kNoWriteBarrier;
- } else if (IsFastElementsKind(elements_kind)) {
- access.type = TypeCache::Get().kFixedArrayLengthType;
- access.machine_type = MachineType::TaggedSigned();
- access.write_barrier_kind = kNoWriteBarrier;
- } else {
- access.type = TypeCache::Get().kJSArrayLengthType;
- }
- } else if (instance_type == JS_TYPED_ARRAY_TYPE) {
- access.type = TypeCache::Get().kJSTypedArrayLengthType;
- access.machine_type = MachineType::TaggedSigned();
- access.write_barrier_kind = kNoWriteBarrier;
- }
return access;
}
// static
-FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
- FieldAccess access = {
- kTaggedBase, JSArrayIterator::kIteratedObjectMapOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSArrayIteratorKind() {
+ FieldAccess access = {kTaggedBase,
+ JSArrayIterator::kKindOffset,
+ Handle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kJSArrayIteratorKindType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index fb8535c167..44298c2188 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -241,15 +241,14 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGlobalProxy::native_context() field.
static FieldAccess ForJSGlobalProxyNativeContext();
- // Provides access to JSArrayIterator::object() field.
- static FieldAccess ForJSArrayIteratorObject();
+ // Provides access to JSArrayIterator::iterated_object() field.
+ static FieldAccess ForJSArrayIteratorIteratedObject();
- // Provides access to JSArrayIterator::index() field.
- static FieldAccess ForJSArrayIteratorIndex(InstanceType type = JS_OBJECT_TYPE,
- ElementsKind kind = NO_ELEMENTS);
+ // Provides access to JSArrayIterator::next_index() field.
+ static FieldAccess ForJSArrayIteratorNextIndex();
- // Provides access to JSArrayIterator::object_map() field.
- static FieldAccess ForJSArrayIteratorObjectMap();
+ // Provides access to JSArrayIterator::kind() field.
+ static FieldAccess ForJSArrayIteratorKind();
// Provides access to JSStringIterator::string() field.
static FieldAccess ForJSStringIteratorString();
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 54d402738b..808b8af0d1 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -87,7 +87,7 @@ class PropertyAccessInfo final {
PropertyAccessInfo();
bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
- Zone* zone) WARN_UNUSED_RESULT;
+ Zone* zone) V8_WARN_UNUSED_RESULT;
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataConstant() const { return kind() == kDataConstant; }
diff --git a/deps/v8/src/compiler/allocation-builder.h b/deps/v8/src/compiler/allocation-builder.h
index a0ba02cd33..86fea33090 100644
--- a/deps/v8/src/compiler/allocation-builder.h
+++ b/deps/v8/src/compiler/allocation-builder.h
@@ -48,6 +48,16 @@ class AllocationBuilder final {
index, value, effect_, control_);
}
+ // Compound allocation of a context.
+ void AllocateContext(int length, Handle<Map> map) {
+ DCHECK(map->instance_type() >= BLOCK_CONTEXT_TYPE &&
+ map->instance_type() <= WITH_CONTEXT_TYPE);
+ int size = FixedArray::SizeFor(length);
+ Allocate(size, NOT_TENURED, Type::OtherInternal());
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+ }
+
// Compound allocation of a FixedArray.
void AllocateArray(int length, Handle<Map> map,
PretenureFlag pretenure = NOT_TENURED) {
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 8636c639e0..b5f258ab45 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -7,13 +7,13 @@
#include "src/arm/macro-assembler-arm.h"
#include "src/assembler-inl.h"
#include "src/boxed-float.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
#include "src/heap/heap-inl.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -324,7 +324,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
+ InstructionCode opcode,
ArmOperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
@@ -334,6 +334,25 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
}
}
+void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
+ InstructionCode opcode,
+ ArmOperandConverter& i, Register address) {
+ DCHECK_EQ(kMemoryAccessPoisoned,
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
+ switch (AddressingModeField::decode(opcode)) {
+ case kMode_Offset_RI:
+ codegen->tasm()->mov(address, i.InputImmediate(1));
+ codegen->tasm()->add(address, address, i.InputRegister(0));
+ break;
+ case kMode_Offset_RR:
+ codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
+}
+
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -620,7 +639,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.Acquire();
@@ -896,6 +915,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ mov(i.OutputRegister(), kRootRegister);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
@@ -988,8 +1011,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
__ vmov(d0, d2);
break;
}
@@ -1514,12 +1536,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1527,11 +1549,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1539,14 +1561,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- __ vldr(i.OutputFloatRegister(), i.InputOffset());
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ UseScratchRegisterScope temps(tasm());
+ Register address = temps.Acquire();
+ ComputePoisonedAddressForLoad(this, opcode, i, address);
+ __ vldr(i.OutputFloatRegister(), address, 0);
+ } else {
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
+ }
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1574,10 +1605,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.NeonInputOperand(1));
break;
}
- case kArmVldrF64:
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ case kArmVldrF64: {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ UseScratchRegisterScope temps(tasm());
+ Register address = temps.Acquire();
+ ComputePoisonedAddressForLoad(this, opcode, i, address);
+ __ vldr(i.OutputDoubleRegister(), address, 0);
+ } else {
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ }
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ }
case kArmVstrF64:
__ vstr(i.InputDoubleRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1700,6 +1741,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
+ case kArchPoisonOnSpeculationWord:
+ __ and_(i.OutputRegister(0), i.InputRegister(0),
+ Operand(kSpeculationPoisonRegister));
+ break;
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2902,7 +2947,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3263,12 +3308,32 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vstr(temp_1, dst);
} else if (source->IsDoubleStackSlot()) {
UseScratchRegisterScope temps(tasm());
- DwVfpRegister temp_0 = temps.AcquireD();
- DwVfpRegister temp_1 = temps.AcquireD();
- __ vldr(temp_0, dst);
- __ vldr(temp_1, src);
- __ vstr(temp_0, src);
- __ vstr(temp_1, dst);
+ LowDwVfpRegister temp = temps.AcquireLowD();
+ if (temps.CanAcquireD()) {
+ DwVfpRegister temp_0 = temp;
+ DwVfpRegister temp_1 = temps.AcquireD();
+ __ vldr(temp_0, dst);
+ __ vldr(temp_1, src);
+ __ vstr(temp_0, src);
+ __ vstr(temp_1, dst);
+ } else {
+ // We only have a single D register available. However, we can split
+ // it into 2 S registers and swap the slots 32 bits at a time.
+ MemOperand src0 = src;
+ MemOperand dst0 = dst;
+ MemOperand src1(src.rn(), src.offset() + kFloatSize);
+ MemOperand dst1(dst.rn(), dst.offset() + kFloatSize);
+ SwVfpRegister temp_0 = temp.low();
+ SwVfpRegister temp_1 = temp.high();
+ __ vldr(temp_0, dst0);
+ __ vldr(temp_1, src0);
+ __ vstr(temp_0, src0);
+ __ vstr(temp_1, dst0);
+ __ vldr(temp_0, dst1);
+ __ vldr(temp_1, src1);
+ __ vstr(temp_0, src1);
+ __ vstr(temp_1, dst1);
+ }
} else {
DCHECK(source->IsSimd128StackSlot());
MemOperand src0 = src;
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index ef81c98716..b2f6fd6e71 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -114,14 +114,14 @@ void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
}
void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
g.UseRegister(node->InputAt(1)));
@@ -248,9 +248,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand inputs[5];
+ InstructionOperand inputs[3];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
if (m.left().node() == m.right().node()) {
@@ -281,33 +281,16 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(m.right().node());
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
outputs[output_count++] = g.DefineAsRegister(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -461,7 +444,7 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -569,7 +552,7 @@ void InstructionSelector::VisitProtectedStore(Node* node) {
void InstructionSelector::VisitUnalignedLoad(Node* node) {
MachineRepresentation load_rep =
- UnalignedLoadRepresentationOf(node->op()).representation();
+ LoadRepresentationOf(node->op()).representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -889,22 +872,14 @@ void VisitShift(InstructionSelector* selector, Node* node,
TryMatchShift try_match_shift, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmMov;
- InstructionOperand inputs[4];
+ InstructionOperand inputs[2];
size_t input_count = 2;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
outputs[output_count++] = g.DefineAsRegister(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
@@ -912,17 +887,8 @@ void VisitShift(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -1279,25 +1245,10 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
// result operand needs shift operator.
InstructionOperand shift_31 = g.UseImmediate(31);
- InstructionCode opcode = cont->Encode(kArmCmp) |
- AddressingModeField::encode(kMode_Operand2_R_ASR_I);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), temp_operand, result_operand, shift_31,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- InstructionOperand in[] = {temp_operand, result_operand, shift_31};
- selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
- result_operand, shift_31);
- } else {
- DCHECK(cont->IsTrap());
- InstructionOperand in[] = {temp_operand, result_operand, shift_31,
- g.UseImmediate(cont->trap_id())};
- selector->Emit(opcode, 0, nullptr, 4, in);
- }
+ InstructionCode opcode =
+ kArmCmp | AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+ selector->EmitWithContinuation(opcode, temp_operand, result_operand, shift_31,
+ cont);
}
} // namespace
@@ -1578,22 +1529,7 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- ArmOperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1734,7 +1670,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand inputs[5];
+ InstructionOperand inputs[3];
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
@@ -1767,28 +1703,12 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
}
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- } else if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -1934,23 +1854,9 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
// Continuation could not be combined with a compare, emit compare against 0.
ArmOperandGenerator g(this);
InstructionCode const opcode =
- cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+ kArmTst | AddressingModeField::encode(kMode_Operand2_R);
InstructionOperand const value_operand = g.UseRegister(value);
- if (cont->IsBranch()) {
- Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- value_operand);
- } else {
- DCHECK(cont->IsTrap());
- Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.UseImmediate(cont->trap_id()));
- }
+ EmitWithContinuation(opcode, value_operand, value_operand, cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1960,20 +1866,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
- index_operand, value_operand, g.TempImmediate(sw.min_value));
+ index_operand, value_operand, g.TempImmediate(sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -2560,7 +2466,7 @@ void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
uint8_t mask = CanonicalizeShuffle(node);
uint8_t shuffle32x4[4];
ArmOperandGenerator g(this);
@@ -2674,9 +2580,6 @@ InstructionSelector::AlignmentRequirements() {
SomeUnalignedAccessUnsupported(req_aligned, req_aligned);
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index a07236b859..968c6133fb 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -6,13 +6,13 @@
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/frame-constants.h"
#include "src/heap/heap-inl.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -213,16 +213,11 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
- if (RelocInfo::IsWasmSizeReference(constant.rmode())) {
- return Operand(constant.ToInt32(), constant.rmode());
- } else {
- return Operand(constant.ToInt32());
- }
+ return Operand(constant.ToInt32());
case Constant::kInt64:
if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
return Operand(constant.ToInt64(), constant.rmode());
} else {
- DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode()));
return Operand(constant.ToInt64());
}
case Constant::kFloat32:
@@ -451,6 +446,18 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ Cbnz(i.TempRegister32(2), &binop); \
} while (0)
+#define ASSEMBLE_ATOMIC64_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Bind(&binop); \
+ __ load_instr(i.OutputRegister(), i.TempRegister(0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(2), i.TempRegister(1), i.TempRegister(0)); \
+ __ Cbnz(i.TempRegister(2), &binop); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
@@ -577,7 +584,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
UseScratchRegisterScope temps(tasm());
Register scratch = temps.AcquireX();
@@ -844,6 +851,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ mov(i.OutputRegister(), root);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
@@ -930,8 +940,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
break;
}
case kIeee754Float64Sin:
@@ -1592,6 +1601,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
+ case kArchPoisonOnSpeculationWord:
+ __ And(i.OutputRegister(0), i.InputRegister(0),
+ Operand(kSpeculationPoisonRegister));
+ break;
case kWord32AtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
@@ -1658,6 +1671,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint8: \
+ case kArm64Word64Atomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst); \
break; \
case kWord32Atomic##op##Int16: \
@@ -1665,9 +1679,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
case kWord32Atomic##op##Uint16: \
+ case kArm64Word64Atomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst); \
break; \
case kWord32Atomic##op##Word32: \
+ case kArm64Word64Atomic##op##Uint32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst); \
break;
ATOMIC_BINOP_CASE(Add, Add)
@@ -1676,11 +1692,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC64_BINOP_CASE(op, inst) \
+ case kArm64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(ldaxr, stlxr, inst); \
+ break;
+ ATOMIC64_BINOP_CASE(Add, Add)
+ ATOMIC64_BINOP_CASE(Sub, Sub)
+ ATOMIC64_BINOP_CASE(And, And)
+ ATOMIC64_BINOP_CASE(Or, Orr)
+ ATOMIC64_BINOP_CASE(Xor, Eor)
+#undef ATOMIC64_BINOP_CASE
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC64_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
@@ -2395,7 +2422,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
if (info()->IsWasm() && shrink_slots > 128) {
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 72218ce8fd..5eb09dfe05 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -300,7 +300,27 @@ namespace compiler {
V(Arm64S1x8AnyTrue) \
V(Arm64S1x8AllTrue) \
V(Arm64S1x16AnyTrue) \
- V(Arm64S1x16AllTrue)
+ V(Arm64S1x16AllTrue) \
+ V(Arm64Word64AtomicAddUint8) \
+ V(Arm64Word64AtomicAddUint16) \
+ V(Arm64Word64AtomicAddUint32) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint8) \
+ V(Arm64Word64AtomicSubUint16) \
+ V(Arm64Word64AtomicSubUint32) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint8) \
+ V(Arm64Word64AtomicAndUint16) \
+ V(Arm64Word64AtomicAndUint32) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint8) \
+ V(Arm64Word64AtomicOrUint16) \
+ V(Arm64Word64AtomicOrUint32) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint8) \
+ V(Arm64Word64AtomicXorUint16) \
+ V(Arm64Word64AtomicXorUint32) \
+ V(Arm64Word64AtomicXorUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 5378cb2f9c..289fe47e09 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -277,13 +277,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S1x8AllTrue:
case kArm64S1x16AnyTrue:
case kArm64S1x16AllTrue:
- return kNoOpcodeFlags;
-
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
- return kIsBlockTerminator;
+ return kNoOpcodeFlags;
case kArm64LdrS:
case kArm64LdrD:
@@ -311,6 +309,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64DsbIsb:
return kHasSideEffect;
+ case kArm64Word64AtomicAddUint8:
+ case kArm64Word64AtomicAddUint16:
+ case kArm64Word64AtomicAddUint32:
+ case kArm64Word64AtomicAddUint64:
+ case kArm64Word64AtomicSubUint8:
+ case kArm64Word64AtomicSubUint16:
+ case kArm64Word64AtomicSubUint32:
+ case kArm64Word64AtomicSubUint64:
+ case kArm64Word64AtomicAndUint8:
+ case kArm64Word64AtomicAndUint16:
+ case kArm64Word64AtomicAndUint32:
+ case kArm64Word64AtomicAndUint64:
+ case kArm64Word64AtomicOrUint8:
+ case kArm64Word64AtomicOrUint16:
+ case kArm64Word64AtomicOrUint32:
+ case kArm64Word64AtomicOrUint64:
+ case kArm64Word64AtomicXorUint8:
+ case kArm64Word64AtomicXorUint16:
+ case kArm64Word64AtomicXorUint32:
+ case kArm64Word64AtomicXorUint64:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 0787ccdc0f..08538cc4e2 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -65,10 +65,10 @@ class Arm64OperandGenerator final : public OperandGenerator {
int64_t GetIntegerConstantValue(Node* node) {
if (node->opcode() == IrOpcode::kInt32Constant) {
- return OpParameter<int32_t>(node);
+ return OpParameter<int32_t>(node->op());
}
DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
- return OpParameter<int64_t>(node);
+ return OpParameter<int64_t>(node->op());
}
bool IsFloatConstant(Node* node) {
@@ -78,10 +78,10 @@ class Arm64OperandGenerator final : public OperandGenerator {
double GetFloatConstantValue(Node* node) {
if (node->opcode() == IrOpcode::kFloat32Constant) {
- return OpParameter<float>(node);
+ return OpParameter<float>(node->op());
}
DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
- return OpParameter<double>(node);
+ return OpParameter<double>(node->op());
}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
@@ -155,7 +155,7 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
}
@@ -170,7 +170,7 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
Arm64OperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
g.UseRegister(node->InputAt(1)));
@@ -416,9 +416,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- InstructionOperand inputs[5];
+ InstructionOperand inputs[3];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
Node* left_node = node->InputAt(0);
@@ -467,35 +467,17 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(right_node);
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
if (!IsComparisonField::decode(properties)) {
outputs[output_count++] = g.DefineAsRegister(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
DCHECK_NE(0u, input_count);
DCHECK((output_count != 0) || IsComparisonField::decode(properties));
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -630,7 +612,7 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -1386,23 +1368,9 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
InstructionOperand right = g.UseRegister(m.right().node());
selector->Emit(kArm64Smull, result, left, right);
- InstructionCode opcode = cont->Encode(kArm64Cmp) |
- AddressingModeField::encode(kMode_Operand2_R_SXTW);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), result, result,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- InstructionOperand in[] = {result, result};
- selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), result, result,
- g.UseImmediate(cont->trap_id()));
- }
+ InstructionCode opcode =
+ kArm64Cmp | AddressingModeField::encode(kMode_Operand2_R_SXTW);
+ selector->EmitWithContinuation(opcode, result, result, cont);
}
} // namespace
@@ -1767,22 +1735,7 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1936,16 +1889,8 @@ FlagsCondition MapForCbz(FlagsCondition cond) {
void EmitBranchOrDeoptimize(InstructionSelector* selector,
InstructionCode opcode, InstructionOperand value,
FlagsContinuation* cont) {
- Arm64OperandGenerator g(selector);
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(opcode), g.NoOutput(), value,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else {
- DCHECK(cont->IsDeoptimize());
- selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- }
+ DCHECK(cont->IsBranch() || cont->IsDeoptimize());
+ selector->EmitWithContinuation(opcode, value, cont);
}
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
@@ -1977,14 +1922,12 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
InstructionOperand temp = g.TempRegister();
selector->Emit(kArm64U64MoveFloat64, temp,
g.UseRegister(node->InputAt(0)));
- selector->Emit(cont->Encode(kArm64TestAndBranch), g.NoOutput(), temp,
- g.TempImmediate(63), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ selector->EmitWithContinuation(kArm64TestAndBranch, temp,
+ g.TempImmediate(63), cont);
return true;
}
- selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
- g.UseRegister(node), g.TempImmediate(31),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ selector->EmitWithContinuation(kArm64TestAndBranch32, g.UseRegister(node),
+ g.TempImmediate(31), cont);
return true;
}
case kEqual:
@@ -1999,11 +1942,9 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
// In the code generator, Equal refers to a bit being cleared. We want
// the opposite here so negate the condition.
cont->Negate();
- selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
- g.UseRegister(m_and.left().node()),
- g.TempImmediate(base::bits::CountTrailingZeros(value)),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ selector->EmitWithContinuation(
+ kArm64TestAndBranch32, g.UseRegister(m_and.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(value)), cont);
return true;
}
}
@@ -2115,10 +2056,10 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
- selector->Emit(
- cont->Encode(kOpcode), g.NoOutput(), g.UseRegister(m.left().node()),
+ selector->EmitWithContinuation(
+ kOpcode, g.UseRegister(m.left().node()),
g.TempImmediate(base::bits::CountTrailingZeros(m.right().Value())),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ cont);
return true;
}
return false;
@@ -2211,7 +2152,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
if ((cont->IsBranch() || cont->IsDeoptimize()) &&
!cont->IsPoisoned()) {
- EmitBranchOrDeoptimize(this, cont->Encode(kArm64CompareAndBranch),
+ EmitBranchOrDeoptimize(this, kArm64CompareAndBranch,
g.UseRegister(left), cont);
return;
}
@@ -2320,25 +2261,13 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- if (cont->IsBranch()) {
- if (cont->IsPoisoned()) {
- // We need an instruction that sets flags for poisoning to work.
- Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
- g.UseRegister(value), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else {
- Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(value), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- }
- } else if (cont->IsDeoptimize()) {
- EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
- g.UseRegister(value), g.UseRegister(value), cont->kind(),
- cont->reason(), cont->feedback(), cont->frame_state());
+ if (!cont->IsPoisoned() && cont->IsBranch()) {
+ Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
+ g.UseRegister(value), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
} else {
- DCHECK(cont->IsTrap());
- Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
- g.UseRegister(value), g.UseImmediate(cont->trap_id()));
+ EmitWithContinuation(cont->Encode(kArm64Tst32), g.UseRegister(value),
+ g.UseRegister(value), cont);
}
}
@@ -2349,20 +2278,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kArm64Sub32, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ g.TempImmediate(sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -2800,6 +2729,55 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ Arm64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+
+ AddressingMode addressing_mode = kMode_MRR;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseRegister(base);
+ inputs[input_count++] = g.UseRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
+ kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
@@ -3061,15 +3039,15 @@ void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
*src0 = *src1 = g->UseRegister(input0);
} else {
// Binary, table registers must be consecutive.
- *src0 = g->UseFixed(input0, fp_fixed2);
- *src1 = g->UseFixed(input1, fp_fixed3);
+ *src0 = g->UseFixed(input0, fp_fixed1);
+ *src1 = g->UseFixed(input1, fp_fixed2);
}
}
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
uint8_t mask = CanonicalizeShuffle(node);
uint8_t shuffle32x4[4];
Arm64OperandGenerator g(this);
@@ -3172,9 +3150,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index bb2229b2f8..ebec8161ba 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -6,7 +6,6 @@
#include <sstream>
-#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
@@ -14,6 +13,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
#include "src/objects-inl.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -48,7 +48,8 @@ static const Operator* PointerConstant(CommonOperatorBuilder* common,
}
BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
- CompilationInfo* info, Graph* graph, Schedule* schedule, Isolate* isolate) {
+ OptimizedCompilationInfo* info, Graph* graph, Schedule* schedule,
+ Isolate* isolate) {
// Skip the exit block in profiles, since the register allocator can't handle
// it and entry into it means falling off the end of the function anyway.
size_t n_blocks = static_cast<size_t>(schedule->RpoBlockCount()) - 1;
@@ -57,7 +58,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
// Set the function name.
if (info->has_shared_info()) {
std::ostringstream os;
- info->shared_info()->name()->PrintUC16(os);
+ info->shared_info()->Name()->PrintUC16(os);
data->SetFunctionName(&os);
}
// Capture the schedule string before instrumentation.
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 3a5b729966..620f38d535 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
+class OptimizedCompilationInfo;
namespace compiler {
@@ -20,7 +20,7 @@ class Schedule;
class BasicBlockInstrumentor : public AllStatic {
public:
- static BasicBlockProfiler::Data* Instrument(CompilationInfo* info,
+ static BasicBlockProfiler::Data* Instrument(OptimizedCompilationInfo* info,
Graph* graph, Schedule* schedule,
Isolate* isolate);
};
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 3b2a3eb252..76c3978f60 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -516,11 +516,12 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id, JSTypeHintLowering::Flags flags, bool stack_check)
+ int inlining_id, JSTypeHintLowering::Flags flags, bool stack_check,
+ bool analyze_environment_liveness)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
- bytecode_array_(handle(shared_info->bytecode_array())),
+ bytecode_array_(handle(shared_info->GetBytecodeArray())),
feedback_vector_(feedback_vector),
type_hint_lowering_(jsgraph, feedback_vector, flags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
@@ -533,6 +534,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
osr_offset_(osr_offset),
currently_peeled_loop_offset_(-1),
stack_check_(stack_check),
+ analyze_environment_liveness_(analyze_environment_liveness),
merge_environments_(local_zone),
generator_merge_environments_(local_zone),
exception_handlers_(local_zone),
@@ -543,7 +545,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exit_controls_(local_zone),
state_values_cache_(jsgraph),
source_positions_(source_positions),
- start_position_(shared_info->start_position(), inlining_id),
+ start_position_(shared_info->StartPosition(), inlining_id),
native_context_(native_context) {}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
@@ -869,7 +871,7 @@ void BytecodeGraphBuilder::VisitSingleBytecode(
void BytecodeGraphBuilder::VisitBytecodes() {
BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
- FLAG_analyze_environment_liveness);
+ analyze_environment_liveness());
bytecode_analysis.Analyze(osr_offset_);
set_bytecode_analysis(&bytecode_analysis);
@@ -878,7 +880,7 @@ void BytecodeGraphBuilder::VisitBytecodes() {
SourcePositionTableIterator source_position_iterator(
handle(bytecode_array()->SourcePositionTable()));
- if (FLAG_trace_environment_liveness) {
+ if (analyze_environment_liveness() && FLAG_trace_environment_liveness) {
OFStream of(stdout);
bytecode_analysis.PrintLivenessTo(of);
@@ -1007,6 +1009,32 @@ void BytecodeGraphBuilder::VisitStaGlobal() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitStaInArrayLiteral() {
+ PrepareEagerCheckpoint();
+ Node* value = environment()->LookupAccumulator();
+ Node* array =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* index =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+ const Operator* op = javascript()->StoreInArrayLiteral(feedback);
+
+ JSTypeHintLowering::LoweringResult lowering =
+ TryBuildSimplifiedStoreKeyed(op, array, index, value, feedback.slot());
+ if (lowering.IsExit()) return;
+
+ Node* node = nullptr;
+ if (lowering.IsSideEffectFree()) {
+ node = lowering.value();
+ } else {
+ DCHECK(!lowering.Changed());
+ node = NewNode(op, array, index, value);
+ }
+
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
PrepareEagerCheckpoint();
@@ -1862,14 +1890,21 @@ Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
void BytecodeGraphBuilder::VisitCallRuntime() {
PrepareEagerCheckpoint();
- Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
+ Runtime::FunctionId function_id = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the runtime call.
- const Operator* call = javascript()->CallRuntime(functionId, reg_count);
+ const Operator* call = javascript()->CallRuntime(function_id, reg_count);
Node* value = ProcessCallRuntimeArguments(call, receiver, reg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
+
+ // Connect to the end if {function_id} is non-returning.
+ if (Runtime::IsNonReturning(function_id)) {
+ // TODO(7099): Investigate if we need LoopExit node here.
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
}
void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
@@ -2021,6 +2056,8 @@ void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
SubEnvironment sub_environment(this);
NewIfTrue();
+ BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset()));
Node* node;
const Operator* op = javascript()->CallRuntime(runtime_id);
if (runtime_id == Runtime::kThrowReferenceError) {
@@ -2496,6 +2533,12 @@ void BytecodeGraphBuilder::VisitToObject() {
BuildCastOperator(javascript()->ToObject());
}
+void BytecodeGraphBuilder::VisitToString() {
+ Node* value =
+ NewNode(javascript()->ToString(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitToNumber() {
PrepareEagerCheckpoint();
Node* object = environment()->LookupAccumulator();
@@ -2827,6 +2870,7 @@ void BytecodeGraphBuilder::BuildSwitchOnGeneratorState(
// the default to represent one of the cases above/fallthrough below?
NewIfDefault();
NewNode(simplified()->RuntimeAbort(AbortReason::kInvalidJumpTableIndex));
+ // TODO(7099): Investigate if we need LoopExit here.
Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 75d464f79e..9025d477d5 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -35,7 +35,7 @@ class BytecodeGraphBuilder {
SourcePositionTable* source_positions, Handle<Context> native_context,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
- bool stack_check = true);
+ bool stack_check = true, bool analyze_environment_liveness = true);
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -349,6 +349,10 @@ class BytecodeGraphBuilder {
void set_stack_check(bool stack_check) { stack_check_ = stack_check; }
+ bool analyze_environment_liveness() const {
+ return analyze_environment_liveness_;
+ }
+
int current_exception_handler() { return current_exception_handler_; }
void set_current_exception_handler(int index) {
@@ -379,6 +383,7 @@ class BytecodeGraphBuilder {
BailoutId osr_offset_;
int currently_peeled_loop_offset_;
bool stack_check_;
+ bool analyze_environment_liveness_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 4ad286c68c..01e8d54707 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -21,6 +21,7 @@
#include "src/machine-type.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/snapshot/serializer-common.h"
#include "src/utils.h"
#include "src/zone/zone.h"
@@ -57,8 +58,9 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- Code::Kind kind, const char* name, size_t result_size, uint32_t stub_key,
- int32_t builtin_index)
+ Code::Kind kind, const char* name,
+ PoisoningMitigationLevel poisoning_enabled, size_t result_size,
+ uint32_t stub_key, int32_t builtin_index)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -67,28 +69,30 @@ CodeAssemblerState::CodeAssemblerState(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size),
- kind, name, stub_key, builtin_index) {}
+ kind, name, poisoning_enabled, stub_key, builtin_index) {}
-CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
- int parameter_count, Code::Kind kind,
- const char* name, int32_t builtin_index)
+CodeAssemblerState::CodeAssemblerState(
+ Isolate* isolate, Zone* zone, int parameter_count, Code::Kind kind,
+ const char* name, PoisoningMitigationLevel poisoning_enabled,
+ int32_t builtin_index)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
kind == Code::BUILTIN
? CallDescriptor::kPushArgumentCount
: CallDescriptor::kNoFlags),
- kind, name, 0, builtin_index) {}
+ kind, name, poisoning_enabled, 0, builtin_index) {}
-CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor,
- Code::Kind kind, const char* name,
- uint32_t stub_key, int32_t builtin_index)
+CodeAssemblerState::CodeAssemblerState(
+ Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
+ Code::Kind kind, const char* name,
+ PoisoningMitigationLevel poisoning_enabled, uint32_t stub_key,
+ int32_t builtin_index)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements())),
+ InstructionSelector::AlignmentRequirements(), poisoning_enabled)),
kind_(kind),
name_(name),
stub_key_(stub_key),
@@ -174,6 +178,10 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
+PoisoningMitigationLevel CodeAssembler::poisoning_enabled() const {
+ return raw_assembler()->poisoning_enabled();
+}
+
// static
Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
DCHECK(!state->code_generated_);
@@ -188,7 +196,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- should_optimize_jumps ? &jump_opt : nullptr);
+ should_optimize_jumps ? &jump_opt : nullptr, rasm->poisoning_enabled());
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
@@ -197,7 +205,7 @@ Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
state->kind_, state->name_, state->stub_key_, state->builtin_index_,
- &jump_opt);
+ &jump_opt, rasm->poisoning_enabled());
}
state->code_generated_ = true;
@@ -260,6 +268,32 @@ TNode<HeapObject> CodeAssembler::LookupConstant(Handle<HeapObject> object) {
return UncheckedCast<HeapObject>(
Load(MachineType::AnyTagged(), builtins_constants_table, offset));
}
+
+// External references are stored in the external reference table.
+TNode<ExternalReference> CodeAssembler::LookupExternalReference(
+ ExternalReference reference) {
+ DCHECK(isolate()->serializer_enabled());
+
+ // Encode as an index into the external reference table stored on the isolate.
+
+ ExternalReferenceEncoder encoder(isolate());
+ ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
+ CHECK(!v.is_from_api());
+ uint32_t index = v.index();
+
+ // Generate code to load from the external reference table.
+
+ const intptr_t roots_to_external_reference_offset =
+ Heap::roots_to_external_reference_table_offset()
+#ifdef V8_TARGET_ARCH_X64
+ - kRootRegisterBias
+#endif
+ + ExternalReferenceTable::OffsetOfEntry(index);
+
+ return UncheckedCast<ExternalReference>(
+ Load(MachineType::Pointer(), LoadRootsPointer(),
+ IntPtrConstant(roots_to_external_reference_offset)));
+}
#endif // V8_EMBEDDED_BUILTINS
TNode<Int32T> CodeAssembler::Int32Constant(int32_t value) {
@@ -319,6 +353,11 @@ TNode<Oddball> CodeAssembler::BooleanConstant(bool value) {
TNode<ExternalReference> CodeAssembler::ExternalConstant(
ExternalReference address) {
+#ifdef V8_EMBEDDED_BUILTINS
+ if (ShouldLoadConstantsFromRootList()) {
+ return LookupExternalReference(address);
+ }
+#endif // V8_EMBEDDED_BUILTINS
return UncheckedCast<ExternalReference>(
raw_assembler()->ExternalConstant(address));
}
@@ -459,12 +498,22 @@ Node* CodeAssembler::LoadParentFramePointer() {
return raw_assembler()->LoadParentFramePointer();
}
+TNode<IntPtrT> CodeAssembler::LoadRootsPointer() {
+ return UncheckedCast<IntPtrT>(raw_assembler()->LoadRootsPointer());
+}
+
Node* CodeAssembler::LoadStackPointer() {
return raw_assembler()->LoadStackPointer();
}
-Node* CodeAssembler::SpeculationPoison() {
- return raw_assembler()->SpeculationPoison();
+TNode<Object> CodeAssembler::PoisonOnSpeculationTagged(
+ SloppyTNode<Object> value) {
+ return UncheckedCast<Object>(
+ raw_assembler()->PoisonOnSpeculationTagged(value));
+}
+
+TNode<WordT> CodeAssembler::PoisonOnSpeculationWord(SloppyTNode<WordT> value) {
+ return UncheckedCast<WordT>(raw_assembler()->PoisonOnSpeculationWord(value));
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
@@ -543,6 +592,10 @@ TNode<WordT> CodeAssembler::WordShr(SloppyTNode<WordT> value, int shift) {
return (shift != 0) ? WordShr(value, IntPtrConstant(shift)) : value;
}
+TNode<WordT> CodeAssembler::WordSar(SloppyTNode<WordT> value, int shift) {
+ return (shift != 0) ? WordSar(value, IntPtrConstant(shift)) : value;
+}
+
TNode<Word32T> CodeAssembler::Word32Shr(SloppyTNode<Word32T> value, int shift) {
return (shift != 0) ? Word32Shr(value, Int32Constant(shift)) : value;
}
@@ -898,12 +951,14 @@ Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType rep, Node* base) {
- return raw_assembler()->Load(rep, base);
+Node* CodeAssembler::Load(MachineType rep, Node* base,
+ LoadSensitivity needs_poisoning) {
+ return raw_assembler()->Load(rep, base, needs_poisoning);
}
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
- return raw_assembler()->Load(rep, base, offset);
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset,
+ LoadSensitivity needs_poisoning) {
+ return raw_assembler()->Load(rep, base, offset, needs_poisoning);
}
Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 1d3abe74f0..c91345e73f 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -18,6 +18,7 @@
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/objects/data-handler.h"
+#include "src/objects/map.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone-containers.h"
@@ -28,6 +29,7 @@ class Callable;
class CallInterfaceDescriptor;
class Isolate;
class JSCollection;
+class JSRegExpStringIterator;
class JSWeakCollection;
class JSWeakMap;
class JSWeakSet;
@@ -36,6 +38,7 @@ class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
+class InterpreterData;
class Factory;
class Zone;
@@ -201,6 +204,7 @@ enum class ObjectType {
#undef ENUM_STRUCT_ELEMENT
class AccessCheckNeeded;
+class BigIntWrapper;
class ClassBoilerplate;
class BooleanWrapper;
class CompilationCacheTable;
@@ -221,6 +225,7 @@ class StringWrapper;
class SymbolWrapper;
class Undetectable;
class UniqueName;
+class WasmGlobalObject;
class WasmMemoryObject;
class WasmModuleObject;
class WasmTableObject;
@@ -652,6 +657,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
}
TNode<HeapObject> LookupConstant(Handle<HeapObject> object);
+ TNode<ExternalReference> LookupExternalReference(ExternalReference reference);
#endif
// Constants.
@@ -731,21 +737,27 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* LoadFramePointer();
Node* LoadParentFramePointer();
+ // Access to the roots pointer.
+ TNode<IntPtrT> LoadRootsPointer();
+
// Access to the stack pointer
Node* LoadStackPointer();
- // Poison mask for speculation.
- Node* SpeculationPoison();
+ // Poison |value| on speculative paths.
+ TNode<Object> PoisonOnSpeculationTagged(SloppyTNode<Object> value);
+ TNode<WordT> PoisonOnSpeculationWord(SloppyTNode<WordT> value);
// Load raw memory location.
- Node* Load(MachineType rep, Node* base);
+ Node* Load(MachineType rep, Node* base,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
template <class Type>
TNode<Type> Load(MachineType rep, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(rep.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(rep, static_cast<Node*>(base)));
}
- Node* Load(MachineType rep, Node* base, Node* offset);
+ Node* Load(MachineType rep, Node* base, Node* offset,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
// Load a value from the root array.
@@ -853,6 +865,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<WordT> WordShl(SloppyTNode<WordT> value, int shift);
TNode<WordT> WordShr(SloppyTNode<WordT> value, int shift);
+ TNode<WordT> WordSar(SloppyTNode<WordT> value, int shift);
TNode<IntPtrT> WordShr(TNode<IntPtrT> value, int shift) {
return UncheckedCast<IntPtrT>(WordShr(static_cast<Node*>(value), shift));
}
@@ -1100,6 +1113,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
+ PoisoningMitigationLevel poisoning_enabled() const;
private:
// These two don't have definitions and are here only for catching use cases
@@ -1244,13 +1258,15 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, Code::Kind kind,
- const char* name, size_t result_size = 1,
- uint32_t stub_key = 0,
+ const char* name,
+ PoisoningMitigationLevel poisoning_enabled,
+ size_t result_size = 1, uint32_t stub_key = 0,
int32_t builtin_index = Builtins::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
Code::Kind kind, const char* name,
+ PoisoningMitigationLevel poisoning_enabled,
int32_t builtin_index = Builtins::kNoBuiltinId);
~CodeAssemblerState();
@@ -1272,8 +1288,9 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, Code::Kind kind,
- const char* name, uint32_t stub_key,
- int32_t builtin_index);
+ const char* name,
+ PoisoningMitigationLevel poisoning_enabled,
+ uint32_t stub_key, int32_t builtin_index);
std::unique_ptr<RawMachineAssembler> raw_assembler_;
Code::Kind kind_;
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 71b0394eab..f52347cd98 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -7,14 +7,14 @@
#include "src/address-map.h"
#include "src/assembler-inl.h"
#include "src/base/adapters.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/eh-frame.h"
#include "src/frames.h"
#include "src/macro-assembler-inl.h"
-#include "src/trap-handler/trap-handler.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -37,13 +37,14 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info, Isolate* isolate,
- base::Optional<OsrHelper> osr_helper, int start_source_position,
- JumpOptimizationInfo* jump_opt,
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
- LoadPoisoning load_poisoning)
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ WasmCompilationData* wasm_compilation_data,
+ CodeGeneratorPoisoningLevel poisoning_level)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -72,10 +73,11 @@ CodeGenerator::CodeGenerator(
osr_helper_(osr_helper),
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
- source_position_table_builder_(info->SourcePositionRecordingMode()),
- protected_instructions_(protected_instructions),
+ source_position_table_builder_(
+ SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
+ wasm_compilation_data_(wasm_compilation_data),
result_(kSuccess),
- load_poisoning_(load_poisoning) {
+ poisoning_level_(poisoning_level) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -89,13 +91,15 @@ CodeGenerator::CodeGenerator(
}
}
+bool CodeGenerator::wasm_runtime_exception_support() const {
+ DCHECK(wasm_compilation_data_);
+ return wasm_compilation_data_->runtime_exception_support();
+}
+
void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
uint32_t landing_offset) {
- if (protected_instructions_ != nullptr) {
- trap_handler::ProtectedInstructionData data = {instr_offset,
- landing_offset};
- protected_instructions_->emplace_back(data);
- }
+ DCHECK_NOT_NULL(wasm_compilation_data_);
+ wasm_compilation_data_->AddProtectedInstruction(instr_offset, landing_offset);
}
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
@@ -135,7 +139,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
}
void CodeGenerator::AssembleCode() {
- CompilationInfo* info = this->info();
+ OptimizedCompilationInfo* info = this->info();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -168,22 +172,11 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
- // Initialize {kSpeculationPoisonRegister} either by comparing the expected
- // with the actual call target, or by unconditionally using {-1} initially.
- // Masking register arguments with it only makes sense in the first case.
- if (info->is_generating_speculation_poison_on_entry()) {
- tasm()->RecordComment("-- Prologue: generate speculation poison --");
- GenerateSpeculationPoison();
- if (info->is_poisoning_register_arguments()) {
- AssembleRegisterArgumentPoisoning();
- }
- } else {
- InitializePoisonForLoadsIfNeeded();
- }
+ InitializeSpeculationPoison();
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
- for (CompilationInfo::InlinedFunctionHolder& inlined :
+ for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
if (!inlined.shared_info.equals(info->shared_info())) {
int index = DefineDeoptimizationLiteral(
@@ -389,10 +382,10 @@ Handle<Code> CodeGenerator::FinalizeCode() {
frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
handler_table_offset_);
isolate()->counters()->total_compiled_code_size()->Increment(
- result->instruction_size());
+ result->raw_instruction_size());
LOG_CODE_EVENT(isolate(),
- CodeLinePosInfoRecordEvent(result->instruction_start(),
+ CodeLinePosInfoRecordEvent(result->raw_instruction_start(),
*source_positions));
return result;
@@ -679,7 +672,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
// TODO(jarin) We should thread the flag through rather than set it.
if (instr->IsCall()) {
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
return kSuccess;
@@ -699,7 +692,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
source_position_table_builder_.AddPosition(tasm()->pc_offset(),
source_position, false);
if (FLAG_code_comments) {
- CompilationInfo* info = this->info();
+ OptimizedCompilationInfo* info = this->info();
if (info->IsStub()) return;
std::ostringstream buffer;
buffer << "-- ";
@@ -741,8 +734,8 @@ void CodeGenerator::AssembleGaps(Instruction* instr) {
namespace {
Handle<PodArray<InliningPosition>> CreateInliningPositions(
- CompilationInfo* info, Isolate* isolate) {
- const CompilationInfo::InlinedFunctionList& inlined_functions =
+ OptimizedCompilationInfo* info, Isolate* isolate) {
+ const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
info->inlined_functions();
if (inlined_functions.size() == 0) {
return Handle<PodArray<InliningPosition>>::cast(
@@ -760,7 +753,7 @@ Handle<PodArray<InliningPosition>> CreateInliningPositions(
} // namespace
Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
- CompilationInfo* info = this->info();
+ OptimizedCompilationInfo* info = this->info();
int deopt_count = static_cast<int>(deoptimization_states_.size());
if (deopt_count == 0 && !info->is_osr()) {
return DeoptimizationData::Empty(isolate());
@@ -841,9 +834,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 1 (just after the
- // code address).
- size_t frame_state_offset = 1;
+ // If the frame state is present, it starts at argument 2 - after
+ // the code address and the poison-alias index.
+ size_t frame_state_offset = 2;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset();
@@ -1015,6 +1008,14 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
bailout_id, shared_info_id, parameter_count);
break;
}
+ case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
+ BailoutId bailout_id = descriptor->bailout_id();
+ int parameter_count =
+ static_cast<unsigned int>(descriptor->parameters_count());
+ translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ bailout_id, shared_info_id, parameter_count);
+ break;
+ }
}
TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
@@ -1190,8 +1191,25 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
return exit;
}
-void CodeGenerator::InitializePoisonForLoadsIfNeeded() {
- if (load_poisoning_ == LoadPoisoning::kDoPoison) {
+void CodeGenerator::InitializeSpeculationPoison() {
+ if (poisoning_level_ == CodeGeneratorPoisoningLevel::kDontPoison) return;
+
+ // Initialize {kSpeculationPoisonRegister} either by comparing the expected
+ // with the actual call target, or by unconditionally using {-1} initially.
+ // Masking register arguments with it only makes sense in the first case.
+ if (info()->called_with_code_start_register()) {
+ tasm()->RecordComment("-- Prologue: generate speculation poison --");
+ GenerateSpeculationPoisonFromCodeStartRegister();
+ if (info()->is_poisoning_register_arguments()) {
+ AssembleRegisterArgumentPoisoning();
+ }
+ } else {
+ ResetSpeculationPoison();
+ }
+}
+
+void CodeGenerator::ResetSpeculationPoison() {
+ if (poisoning_level_ == CodeGeneratorPoisoningLevel::kPoisonAll) {
tasm()->ResetSpeculationPoisonRegister();
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index a91ae0212a..e33aed0d9b 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -18,11 +18,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
-
-namespace trap_handler {
-struct ProtectedInstructionData;
-} // namespace trap_handler
+class OptimizedCompilationInfo;
namespace compiler {
@@ -31,6 +27,7 @@ class DeoptimizationExit;
class FrameAccessState;
class Linkage;
class OutOfLineCode;
+class WasmCompilationData;
struct BranchInfo {
FlagsCondition condition;
@@ -77,17 +74,23 @@ class DeoptimizationLiteral {
double number_;
};
+enum class CodeGeneratorPoisoningLevel {
+ kDontPoison,
+ kPoisonStackPointerInPrologue,
+ kPoisonAll
+};
+
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
+ InstructionSequence* code,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
int start_source_position,
JumpOptimizationInfo* jump_opt,
- std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions,
- LoadPoisoning load_poisoning);
+ WasmCompilationData* wasm_compilation_data,
+ CodeGeneratorPoisoningLevel poisoning_level);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -108,6 +111,8 @@ class CodeGenerator final : public GapResolver::Assembler {
void AddProtectedInstructionLanding(uint32_t instr_offset,
uint32_t landing_offset);
+ bool wasm_runtime_exception_support() const;
+
SourcePosition start_source_position() const {
return start_source_position_;
}
@@ -127,7 +132,7 @@ class CodeGenerator final : public GapResolver::Assembler {
private:
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
- CompilationInfo* info() const { return info_; }
+ OptimizedCompilationInfo* info() const { return info_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }
// Create the FrameAccessState object. The Frame is immutable from here on.
@@ -155,10 +160,12 @@ class CodeGenerator final : public GapResolver::Assembler {
// predecessor blocks ends with a masking branch.
void TryInsertBranchPoisoning(const InstructionBlock* block);
- // Initializes the masking register.
- // Eventually, this should be always threaded through from the caller
- // (in the proplogue) or from a callee (after a call).
- void InitializePoisonForLoadsIfNeeded();
+ // Initializes the masking register in the prologue of a function.
+ void InitializeSpeculationPoison();
+ // Reset the masking register during execution of a function.
+ void ResetSpeculationPoison();
+ // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
+ void GenerateSpeculationPoisonFromCodeStartRegister();
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(Instruction* instr,
@@ -206,10 +213,6 @@ class CodeGenerator final : public GapResolver::Assembler {
// from the JS functions referring it.
void BailoutIfDeoptimized();
- // Generates a mask which can be used to poison values when we detect
- // the code is executing speculatively.
- void GenerateSpeculationPoison();
-
// Generates code to poison the stack pointer and implicit register arguments
// like the context register and the function register.
void AssembleRegisterArgumentPoisoning();
@@ -380,7 +383,7 @@ class CodeGenerator final : public GapResolver::Assembler {
Linkage* const linkage_;
InstructionSequence* const code_;
UnwindingInfoWriter unwinding_info_writer_;
- CompilationInfo* const info_;
+ OptimizedCompilationInfo* const info_;
Label* const labels_;
Label return_label_;
RpoNumber current_block_;
@@ -416,9 +419,9 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
+ WasmCompilationData* wasm_compilation_data_;
CodeGenResult result_;
- LoadPoisoning load_poisoning_;
+ CodeGeneratorPoisoningLevel poisoning_level_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 2f4888617c..388cc66c16 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -438,8 +438,8 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
for (size_t i = 0; i < projection_count - 1; i++) {
Node* if_value = projections[i];
DCHECK_EQ(IrOpcode::kIfValue, if_value->opcode());
- int32_t value_index = OpParameter<int32_t>(if_value->op());
- if (value_index == mswitched.Value()) {
+ const IfValueParameters& p = IfValueParametersOf(if_value->op());
+ if (p.value() == mswitched.Value()) {
matched = true;
Replace(if_value, control);
break;
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 36b1caffa6..7488e36305 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -391,6 +391,26 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
return OpParameter<TypedObjectStateInfo>(op).machine_types();
}
+V8_EXPORT_PRIVATE bool operator==(IfValueParameters const& l,
+ IfValueParameters const& r) {
+ return l.value() == r.value() && r.comparison_order() == r.comparison_order();
+}
+
+size_t hash_value(IfValueParameters const& p) {
+ return base::hash_combine(p.value(), p.comparison_order());
+}
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& out,
+ IfValueParameters const& p) {
+ out << p.value() << " (order " << p.comparison_order() << ")";
+ return out;
+}
+
+IfValueParameters const& IfValueParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kIfValue);
+ return OpParameter<IfValueParameters>(op);
+}
+
#define COMMON_CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
V(Unreachable, Operator::kFoldable, 0, 1, 1, 1, 1, 0) \
@@ -994,13 +1014,13 @@ const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
1, 0, 1, 0, 0, control_output_count); // counts
}
-
-const Operator* CommonOperatorBuilder::IfValue(int32_t index) {
- return new (zone()) Operator1<int32_t>( // --
- IrOpcode::kIfValue, Operator::kKontrol, // opcode
- "IfValue", // name
- 0, 0, 1, 0, 0, 1, // counts
- index); // parameter
+const Operator* CommonOperatorBuilder::IfValue(int32_t index,
+ int32_t comparison_order) {
+ return new (zone()) Operator1<IfValueParameters>( // --
+ IrOpcode::kIfValue, Operator::kKontrol, // opcode
+ "IfValue", // name
+ 0, 0, 1, 0, 0, 1, // counts
+ IfValueParameters(index, comparison_order)); // parameter
}
@@ -1148,6 +1168,11 @@ const Operator* CommonOperatorBuilder::HeapConstant(
value); // parameter
}
+Handle<HeapObject> HeapConstantOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ return OpParameter<Handle<HeapObject>>(op);
+}
+
const Operator* CommonOperatorBuilder::RelocatableInt32Constant(
int32_t value, RelocInfo::Mode rmode) {
return new (zone()) Operator1<RelocatablePtrConstantInfo>( // --
@@ -1486,6 +1511,11 @@ const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
rep); // parameter
}
+const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kFrameState, op->opcode());
+ return OpParameter<FrameStateInfo>(op);
+}
+
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index b753ed88dc..9bb941a836 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -69,8 +69,9 @@ inline bool operator==(const BranchOperatorInfo& a,
}
V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
- const Operator* const);
-V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
+ const Operator* const) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
+ V8_WARN_UNUSED_RESULT;
// Helper function for return nodes, because returns have a hidden value input.
int ValueInputCountOfReturn(Operator const* const op);
@@ -105,9 +106,10 @@ size_t hast_value(DeoptimizeParameters p);
std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
-DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const);
+DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
+ V8_WARN_UNUSED_RESULT;
-IsSafetyCheck IsSafetyCheckOf(const Operator* op);
+IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class SelectParameters final {
public:
@@ -131,14 +133,16 @@ size_t hash_value(SelectParameters const& p);
std::ostream& operator<<(std::ostream&, SelectParameters const& p);
V8_EXPORT_PRIVATE SelectParameters const& SelectParametersOf(
- const Operator* const);
+ const Operator* const) V8_WARN_UNUSED_RESULT;
-V8_EXPORT_PRIVATE CallDescriptor const* CallDescriptorOf(const Operator* const);
+V8_EXPORT_PRIVATE CallDescriptor const* CallDescriptorOf(const Operator* const)
+ V8_WARN_UNUSED_RESULT;
-V8_EXPORT_PRIVATE size_t ProjectionIndexOf(const Operator* const);
+V8_EXPORT_PRIVATE size_t ProjectionIndexOf(const Operator* const)
+ V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE MachineRepresentation
-PhiRepresentationOf(const Operator* const);
+PhiRepresentationOf(const Operator* const) V8_WARN_UNUSED_RESULT;
// The {IrOpcode::kParameter} opcode represents an incoming parameter to the
// function. This class bundles the index and a debug name for such operators.
@@ -157,8 +161,10 @@ class ParameterInfo final {
std::ostream& operator<<(std::ostream&, ParameterInfo const&);
-V8_EXPORT_PRIVATE int ParameterIndexOf(const Operator* const);
-const ParameterInfo& ParameterInfoOf(const Operator* const);
+V8_EXPORT_PRIVATE int ParameterIndexOf(const Operator* const)
+ V8_WARN_UNUSED_RESULT;
+const ParameterInfo& ParameterInfoOf(const Operator* const)
+ V8_WARN_UNUSED_RESULT;
struct ObjectStateInfo final : std::pair<uint32_t, int> {
ObjectStateInfo(uint32_t object_id, int size)
@@ -335,19 +341,20 @@ size_t hash_value(RegionObservability);
std::ostream& operator<<(std::ostream&, RegionObservability);
-RegionObservability RegionObservabilityOf(Operator const*) WARN_UNUSED_RESULT;
+RegionObservability RegionObservabilityOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
std::ostream& operator<<(std::ostream& os,
const ZoneVector<MachineType>* types);
-Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
+Type* TypeGuardTypeOf(Operator const*) V8_WARN_UNUSED_RESULT;
-int OsrValueIndexOf(Operator const*);
+int OsrValueIndexOf(Operator const*) V8_WARN_UNUSED_RESULT;
-SparseInputMask SparseInputMaskOf(Operator const*);
+SparseInputMask SparseInputMaskOf(Operator const*) V8_WARN_UNUSED_RESULT;
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
// The ArgumentsElementsState and ArgumentsLengthState can describe the layout
// for backing stores of arguments objects of various types:
@@ -370,11 +377,41 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
// type to carry a backing store of {kUnappedArguments} type when {K == 0}.
typedef CreateArgumentsType ArgumentsStateType;
-ArgumentsStateType ArgumentsStateTypeOf(Operator const*) WARN_UNUSED_RESULT;
+ArgumentsStateType ArgumentsStateTypeOf(Operator const*) V8_WARN_UNUSED_RESULT;
uint32_t ObjectIdOf(Operator const*);
-MachineRepresentation DeadValueRepresentationOf(Operator const*);
+MachineRepresentation DeadValueRepresentationOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
+class IfValueParameters final {
+ public:
+ IfValueParameters(int32_t value, int32_t comparison_order)
+ : value_(value), comparison_order_(comparison_order) {}
+
+ int32_t value() const { return value_; }
+ int32_t comparison_order() const { return comparison_order_; }
+
+ private:
+ int32_t value_;
+ int32_t comparison_order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(IfValueParameters const&,
+ IfValueParameters const&);
+
+size_t hash_value(IfValueParameters const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ IfValueParameters const&);
+
+V8_EXPORT_PRIVATE IfValueParameters const& IfValueParametersOf(
+ const Operator* op) V8_WARN_UNUSED_RESULT;
+
+const FrameStateInfo& FrameStateInfoOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
+Handle<HeapObject> HeapConstantOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
@@ -395,7 +432,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfSuccess();
const Operator* IfException();
const Operator* Switch(size_t control_output_count);
- const Operator* IfValue(int32_t value);
+ const Operator* IfValue(int32_t value, int32_t order = 0);
const Operator* IfDefault();
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
@@ -487,10 +524,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
-// This should go into some common compiler header, but we do not have such a
-// thing at the moment.
-enum class LoadPoisoning { kDoPoison, kDontPoison };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 6027c8201c..7177a6069d 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -83,6 +83,7 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
Node* if_false;
Node* if_true;
+ int32_t order = 1;
while (true) {
BranchMatcher matcher(branch);
DCHECK(matcher.Matched());
@@ -109,7 +110,7 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
branch->NullAllInputs();
if_true->ReplaceInput(0, node);
}
- NodeProperties::ChangeOp(if_true, common()->IfValue(value));
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value, order++));
if_false->NullAllInputs();
Enqueue(if_true);
@@ -128,7 +129,7 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
node->ReplaceInput(0, index);
NodeProperties::ChangeOp(node, common()->Switch(values.size() + 1));
if_true->ReplaceInput(0, node);
- NodeProperties::ChangeOp(if_true, common()->IfValue(value));
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value, order++));
Enqueue(if_true);
if_false->ReplaceInput(0, node);
NodeProperties::ChangeOp(if_false, common()->IfDefault());
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 290a3b5f34..52cbefb15c 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -13,7 +13,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -682,9 +682,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
- case IrOpcode::kCheckSeqString:
- result = LowerCheckSeqString(node, frame_state);
- break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -827,11 +824,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kDeadValue:
result = LowerDeadValue(node);
break;
- case IrOpcode::kStringFromCharCode:
- result = LowerStringFromCharCode(node);
+ case IrOpcode::kStringFromSingleCharCode:
+ result = LowerStringFromSingleCharCode(node);
break;
- case IrOpcode::kStringFromCodePoint:
- result = LowerStringFromCodePoint(node);
+ case IrOpcode::kStringFromSingleCodePoint:
+ result = LowerStringFromSingleCodePoint(node);
break;
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
@@ -842,21 +839,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringToNumber:
result = LowerStringToNumber(node);
break;
- case IrOpcode::kStringCharAt:
- result = LowerStringCharAt(node);
- break;
case IrOpcode::kStringCharCodeAt:
result = LowerStringCharCodeAt(node);
break;
- case IrOpcode::kSeqStringCharCodeAt:
- result = LowerSeqStringCharCodeAt(node);
- break;
case IrOpcode::kStringCodePointAt:
result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
break;
- case IrOpcode::kSeqStringCodePointAt:
- result = LowerSeqStringCodePointAt(node, UnicodeEncodingOf(node->op()));
- break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
break;
@@ -878,6 +866,24 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kNumberIsFloat64Hole:
result = LowerNumberIsFloat64Hole(node);
break;
+ case IrOpcode::kNumberIsFinite:
+ result = LowerNumberIsFinite(node);
+ break;
+ case IrOpcode::kObjectIsFiniteNumber:
+ result = LowerObjectIsFiniteNumber(node);
+ break;
+ case IrOpcode::kNumberIsInteger:
+ result = LowerNumberIsInteger(node);
+ break;
+ case IrOpcode::kObjectIsInteger:
+ result = LowerObjectIsInteger(node);
+ break;
+ case IrOpcode::kNumberIsSafeInteger:
+ result = LowerNumberIsSafeInteger(node);
+ break;
+ case IrOpcode::kObjectIsSafeInteger:
+ result = LowerObjectIsSafeInteger(node);
+ break;
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
@@ -1500,24 +1506,6 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
return value;
}
-Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
- Node* frame_state) {
- Node* value = node->InputAt(0);
-
- Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* value_instance_type =
- __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
-
- Node* check = __ Word32Equal(
- __ Word32And(
- value_instance_type,
- __ Int32Constant(kStringRepresentationMask | kIsNotStringMask)),
- __ Int32Constant(kSeqStringTag | kStringTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
- check, frame_state);
- return value;
-}
-
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -2179,6 +2167,118 @@ Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
return check;
}
+Node* EffectControlLinearizer::LowerNumberIsFinite(Node* node) {
+ Node* number = node->InputAt(0);
+ Node* diff = __ Float64Sub(number, number);
+ Node* check = __ Float64Equal(diff, diff);
+ return check;
+}
+
+Node* EffectControlLinearizer::LowerObjectIsFiniteNumber(Node* node) {
+ Node* object = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+ Node* one = __ Int32Constant(1);
+
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ // Check if {object} is a Smi.
+ __ GotoIf(ObjectIsSmi(object), &done, one);
+
+ // Check if {object} is a HeapNumber.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
+ __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+ zero);
+
+ // {object} is a HeapNumber.
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
+ Node* diff = __ Float64Sub(value, value);
+ Node* check = __ Float64Equal(diff, diff);
+ __ Goto(&done, check);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerNumberIsInteger(Node* node) {
+ Node* number = node->InputAt(0);
+ Node* trunc = BuildFloat64RoundTruncate(number);
+ Node* diff = __ Float64Sub(number, trunc);
+ Node* check = __ Float64Equal(diff, __ Float64Constant(0));
+ return check;
+}
+
+Node* EffectControlLinearizer::LowerObjectIsInteger(Node* node) {
+ Node* object = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+ Node* one = __ Int32Constant(1);
+
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ // Check if {object} is a Smi.
+ __ GotoIf(ObjectIsSmi(object), &done, one);
+
+ // Check if {object} is a HeapNumber.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
+ __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+ zero);
+
+ // {object} is a HeapNumber.
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
+ Node* trunc = BuildFloat64RoundTruncate(value);
+ Node* diff = __ Float64Sub(value, trunc);
+ Node* check = __ Float64Equal(diff, __ Float64Constant(0));
+ __ Goto(&done, check);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerNumberIsSafeInteger(Node* node) {
+ Node* number = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ Node* trunc = BuildFloat64RoundTruncate(number);
+ Node* diff = __ Float64Sub(number, trunc);
+ Node* check = __ Float64Equal(diff, __ Float64Constant(0));
+ __ GotoIfNot(check, &done, zero);
+ Node* in_range = __ Float64LessThanOrEqual(
+ __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
+ __ Goto(&done, in_range);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsSafeInteger(Node* node) {
+ Node* object = node->InputAt(0);
+ Node* zero = __ Int32Constant(0);
+ Node* one = __ Int32Constant(1);
+
+ auto done = __ MakeLabel(MachineRepresentation::kBit);
+
+ // Check if {object} is a Smi.
+ __ GotoIf(ObjectIsSmi(object), &done, one);
+
+ // Check if {object} is a HeapNumber.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), object);
+ __ GotoIfNot(__ WordEqual(value_map, __ HeapNumberMapConstant()), &done,
+ zero);
+
+ // {object} is a HeapNumber.
+ Node* value = __ LoadField(AccessBuilder::ForHeapNumberValue(), object);
+ Node* trunc = BuildFloat64RoundTruncate(value);
+ Node* diff = __ Float64Sub(value, trunc);
+ Node* check = __ Float64Equal(diff, __ Float64Constant(0));
+ __ GotoIfNot(check, &done, zero);
+ Node* in_range = __ Float64LessThanOrEqual(
+ __ Float64Abs(trunc), __ Float64Constant(kMaxSafeInteger));
+ __ Goto(&done, in_range);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
Node* value = node->InputAt(0);
Node* zero = __ Int32Constant(0);
@@ -2566,7 +2666,7 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
Node* frame = NodeProperties::GetValueInput(node, 0);
Node* length = NodeProperties::GetValueInput(node, 1);
- int mapped_count = OpParameter<int>(node);
+ int mapped_count = NewArgumentsElementsMappedCountOf(node->op());
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kNewArgumentsElements);
@@ -2673,20 +2773,6 @@ Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
- Node* receiver = node->InputAt(0);
- Node* position = node->InputAt(1);
-
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kStringCharAt);
- Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), receiver,
- position, __ NoContextConstant());
-}
-
Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
@@ -2865,132 +2951,78 @@ Node* EffectControlLinearizer::LoadFromSeqString(Node* receiver, Node* position,
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
- Node* receiver = node->InputAt(0);
- Node* position = node->InputAt(1);
-
- Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
- Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
- Node* is_one_byte = __ Word32Equal(
- __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kOneByteStringTag));
-
- return LoadFromSeqString(receiver, position, is_one_byte);
-}
-
-Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
- Node* node, UnicodeEncoding encoding) {
- Node* receiver = node->InputAt(0);
- Node* position = node->InputAt(1);
-
- Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
- Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
- Node* is_one_byte = __ Word32Equal(
- __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kOneByteStringTag));
-
- Node* first_char_code = LoadFromSeqString(receiver, position, is_one_byte);
-
- auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
-
- // Check if first character code is outside of interval [0xD800, 0xDBFF].
- Node* first_out =
- __ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
- __ Int32Constant(0xD800));
- // Return first character code.
- __ GotoIfNot(first_out, &return_result, first_char_code);
- // Check if position + 1 is still in range.
- Node* length = ChangeSmiToInt32(
- __ LoadField(AccessBuilder::ForStringLength(), receiver));
- Node* next_position = __ Int32Add(position, __ Int32Constant(1));
- Node* next_position_in_range = __ Int32LessThan(next_position, length);
- __ GotoIfNot(next_position_in_range, &return_result, first_char_code);
-
- // Load second character code.
- Node* second_char_code =
- LoadFromSeqString(receiver, next_position, is_one_byte);
- // Check if second character code is outside of interval [0xDC00, 0xDFFF].
- Node* second_out =
- __ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
- __ Int32Constant(0xDC00));
- __ GotoIfNot(second_out, &return_result, first_char_code);
-
- Node* result;
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- result = __ Word32Or(
-// Need to swap the order for big-endian platforms
-#if V8_TARGET_BIG_ENDIAN
- __ Word32Shl(first_char_code, __ Int32Constant(16)),
- second_char_code);
-#else
- __ Word32Shl(second_char_code, __ Int32Constant(16)),
- first_char_code);
-#endif
- break;
- case UnicodeEncoding::UTF32: {
- // Convert UTF16 surrogate pair into |word32| code point, encoded as
- // UTF32.
- Node* surrogate_offset =
- __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
-
- // (lead << 10) + trail + SURROGATE_OFFSET
- result = __ Int32Add(__ Word32Shl(first_char_code, __ Int32Constant(10)),
- __ Int32Add(second_char_code, surrogate_offset));
- break;
- }
- }
- __ Goto(&return_result, result);
-
- __ Bind(&return_result);
- return return_result.PhiAt(0);
-}
-
-Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
+Node* EffectControlLinearizer::LowerStringFromSingleCharCode(Node* node) {
Node* value = node->InputAt(0);
+ Node* code = __ Word32And(value, __ Uint32Constant(0xFFFF));
- auto runtime_call = __ MakeDeferredLabel();
- auto if_undefined = __ MakeDeferredLabel();
+ auto if_not_one_byte = __ MakeDeferredLabel();
+ auto cache_miss = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kTagged);
- // Compute the character code.
- Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
+ // Check if the {code} is a one byte character
+ Node* check1 = __ Uint32LessThanOrEqual(
+ code, __ Uint32Constant(String::kMaxOneByteCharCode));
+ __ GotoIfNot(check1, &if_not_one_byte);
+ {
+ // Load the isolate wide single character string cache.
+ Node* cache = __ HeapConstant(factory()->single_character_string_cache());
+
+ // Compute the {cache} index for {code}.
+ Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
- // Check if the {code} is a one-byte char code.
- Node* check0 = __ Int32LessThanOrEqual(
- code, __ Int32Constant(String::kMaxOneByteCharCode));
- __ GotoIfNot(check0, &runtime_call);
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Node* entry =
+ __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
- // Load the isolate wide single character string cache.
- Node* cache = __ HeapConstant(factory()->single_character_string_cache());
+ Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+ __ GotoIf(check2, &cache_miss);
- // Compute the {cache} index for {code}.
- Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
+ // Use the {entry} from the {cache}.
+ __ Goto(&done, entry);
- // Check if we have an entry for the {code} in the single character string
- // cache already.
- Node* entry =
- __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
+ __ Bind(&cache_miss);
+ {
+ // Allocate a new SeqOneByteString for {code}.
+ Node* vtrue2 = __ Allocate(
+ NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vtrue2,
+ __ HeapConstant(factory()->one_byte_string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
+ __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ vtrue2,
+ __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ code);
- Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
- __ GotoIf(check1, &runtime_call);
- __ Goto(&done, entry);
+ // Remember it in the {cache}.
+ __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
+ vtrue2);
+ __ Goto(&done, vtrue2);
+ }
+ }
- // Let %StringFromCharCode handle this case.
- // TODO(turbofan): At some point we may consider adding a stub for this
- // deferred case, so that we don't need to call to C++ here.
- __ Bind(&runtime_call);
+ __ Bind(&if_not_one_byte);
{
- Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kStringCharFromCode;
- auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- Node* vtrue1 = __ Call(
- call_descriptor, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
- __ ExternalConstant(ExternalReference(id, isolate())),
- __ Int32Constant(1), __ NoContextConstant());
- __ Goto(&done, vtrue1);
+ // Allocate a new SeqTwoByteString for {code}.
+ Node* vfalse1 = __ Allocate(NOT_TENURED,
+ __ Int32Constant(SeqTwoByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vfalse1,
+ __ HeapConstant(factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse1, __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
+ vfalse1,
+ __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ code);
+ __ Goto(&done, vfalse1);
}
+
__ Bind(&done);
return done.PhiAt(0);
}
@@ -3035,7 +3067,7 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
#endif // V8_INTL_SUPPORT
-Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
+Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
Node* value = node->InputAt(0);
Node* code = value;
@@ -3135,7 +3167,11 @@ Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
__ Int32Constant(0xDC00));
// codpoint = (trail << 16) | lead;
+#if V8_TARGET_BIG_ENDIAN
+ code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
+#else
code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
+#endif
break;
}
}
@@ -4278,9 +4314,8 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
}
Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
- Node* round_down = __ Float64RoundDown(value);
- if (round_down != nullptr) {
- return round_down;
+ if (machine()->Float64RoundDown().IsSupported()) {
+ return __ Float64RoundDown(value);
}
Node* const input = value;
@@ -4424,14 +4459,10 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
return Just(done.PhiAt(0));
}
-Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
- // Nothing to be done if a fast hardware instruction is available.
+Node* EffectControlLinearizer::BuildFloat64RoundTruncate(Node* input) {
if (machine()->Float64RoundTruncate().IsSupported()) {
- return Nothing<Node*>();
+ return __ Float64RoundTruncate(input);
}
-
- Node* const input = node->InputAt(0);
-
// General case for trunc.
//
// if 0.0 < input then
@@ -4512,7 +4543,17 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
__ Goto(&done, input);
}
__ Bind(&done);
- return Just(done.PhiAt(0));
+ return done.PhiAt(0);
+}
+
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
+ // Nothing to be done if a fast hardware instruction is available.
+ if (machine()->Float64RoundTruncate().IsSupported()) {
+ return Nothing<Node*>();
+ }
+
+ Node* const input = node->InputAt(0);
+ return Just(BuildFloat64RoundTruncate(input));
}
Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 21425d3ab0..4bab19ebcb 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -63,7 +63,6 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
- Node* LowerCheckSeqString(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
@@ -106,6 +105,12 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
Node* LowerNumberIsFloat64Hole(Node* node);
+ Node* LowerNumberIsFinite(Node* node);
+ Node* LowerObjectIsFiniteNumber(Node* node);
+ Node* LowerNumberIsInteger(Node* node);
+ Node* LowerObjectIsInteger(Node* node);
+ Node* LowerNumberIsSafeInteger(Node* node);
+ Node* LowerObjectIsSafeInteger(Node* node);
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
@@ -116,15 +121,12 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerSameValue(Node* node);
Node* LowerDeadValue(Node* node);
Node* LowerStringToNumber(Node* node);
- Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
- Node* LowerSeqStringCharCodeAt(Node* node);
Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
- Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
- Node* LowerStringFromCharCode(Node* node);
- Node* LowerStringFromCodePoint(Node* node);
+ Node* LowerStringFromSingleCharCode(Node* node);
+ Node* LowerStringFromSingleCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
Node* LowerStringSubstring(Node* node);
Node* LowerStringLength(Node* node);
@@ -171,6 +173,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
+ Node* BuildFloat64RoundTruncate(Node* input);
Node* ComputeIntegerHash(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 66715b9a94..81da548ca2 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -228,8 +228,7 @@ void EscapeAnalysisReducer::VerifyReplacement() const {
void EscapeAnalysisReducer::Finalize() {
for (Node* node : arguments_elements_) {
- DCHECK_EQ(IrOpcode::kNewArgumentsElements, node->opcode());
- int mapped_count = OpParameter<int>(node);
+ int mapped_count = NewArgumentsElementsMappedCountOf(node->op());
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
if (arguments_frame->opcode() != IrOpcode::kArgumentsFrame) continue;
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 7d55cc29d3..15ca8367b0 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -63,6 +63,9 @@ std::ostream& operator<<(std::ostream& os, FrameStateType type) {
case FrameStateType::kJavaScriptBuiltinContinuation:
os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
break;
+ case FrameStateType::kJavaScriptBuiltinContinuationWithCatch:
+ os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME";
+ break;
}
return os;
}
@@ -80,6 +83,24 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
namespace {
+// Lazy deopt points where the frame state is assocated with a call get an
+// additional parameter for the return result from the call. The return result
+// is added by the deoptimizer and not explicitly specified in the frame state.
+// Lazy deopt points which can catch exceptions further get an additional
+// parameter, namely the exception thrown. The exception is also added by the
+// deoptimizer.
+uint8_t DeoptimizerParameterCountFor(ContinuationFrameStateMode mode) {
+ switch (mode) {
+ case ContinuationFrameStateMode::EAGER:
+ return 0;
+ case ContinuationFrameStateMode::LAZY:
+ return 1;
+ case ContinuationFrameStateMode::LAZY_WITH_CATCH:
+ return 2;
+ }
+ UNREACHABLE();
+}
+
Node* CreateBuiltinContinuationFrameStateCommon(
JSGraph* jsgraph, FrameStateType frame_type, Builtins::Name name,
Node* closure, Node* context, Node** parameters, int parameter_count,
@@ -120,11 +141,10 @@ Node* CreateStubBuiltinContinuationFrameState(
CallInterfaceDescriptor descriptor = callable.descriptor();
std::vector<Node*> actual_parameters;
- // Stack parameters first. If the deoptimization is LAZY, the final parameter
- // is added by the deoptimizer and isn't explicitly passed in the frame state.
- int stack_parameter_count =
- descriptor.GetRegisterParameterCount() -
- (mode == ContinuationFrameStateMode::LAZY ? 1 : 0);
+ // Stack parameters first. Depending on {mode}, final parameters are added
+ // by the deoptimizer and aren't explicitly passed in the frame state.
+ int stack_parameter_count = descriptor.GetRegisterParameterCount() -
+ DeoptimizerParameterCountFor(mode);
for (int i = 0; i < stack_parameter_count; ++i) {
actual_parameters.push_back(
parameters[descriptor.GetRegisterParameterCount() + i]);
@@ -149,18 +169,12 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
Isolate* const isolate = jsgraph->isolate();
Callable const callable = Builtins::CallableFor(isolate, name);
- // Lazy deopt points where the frame state is assocated with a call get an
- // additional parameter for the return result from the call that's added by
- // the deoptimizer and not explicitly specified in the frame state. Check that
- // there is not a mismatch between the number of frame state parameters and
- // the stack parameters required by the builtin taking this into account.
+ // Depending on {mode}, final parameters are added by the deoptimizer
+ // and aren't explicitly passed in the frame state.
DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver
- stack_parameter_count +
- (mode == ContinuationFrameStateMode::EAGER ? 0 : 1));
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
- Node* argc =
- jsgraph->Constant(stack_parameter_count -
- (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
+ Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
// Stack parameters first. They must be first because the receiver is expected
// to be the second value in the translation when creating stack crawls
@@ -177,8 +191,11 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
actual_parameters.push_back(argc);
return CreateBuiltinContinuationFrameStateCommon(
- jsgraph, FrameStateType::kJavaScriptBuiltinContinuation, name, target,
- context, &actual_parameters[0],
+ jsgraph,
+ mode == ContinuationFrameStateMode::LAZY_WITH_CATCH
+ ? FrameStateType::kJavaScriptBuiltinContinuationWithCatch
+ : FrameStateType::kJavaScriptBuiltinContinuation,
+ name, target, context, &actual_parameters[0],
static_cast<int>(actual_parameters.size()), outer_frame_state, shared);
}
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index fb3d42ff41..b12758ac3b 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -60,12 +60,15 @@ class OutputFrameStateCombine {
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kInterpretedFunction, // Represents an InterpretedFrame.
- kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
- kConstructStub, // Represents a ConstructStubFrame.
- kBuiltinContinuation, // Represents a continuation to a stub.
- kJavaScriptBuiltinContinuation // Represents a continuation to a JavaScipt
- // builtin.
+ kInterpretedFunction, // Represents an InterpretedFrame.
+ kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
+ kConstructStub, // Represents a ConstructStubFrame.
+ kBuiltinContinuation, // Represents a continuation to a stub.
+ kJavaScriptBuiltinContinuation, // Represents a continuation to a JavaScipt
+ // builtin.
+ kJavaScriptBuiltinContinuationWithCatch // Represents a continuation to a
+ // JavaScipt builtin with a catch
+ // handler.
};
class FrameStateFunctionInfo {
@@ -85,7 +88,8 @@ class FrameStateFunctionInfo {
static bool IsJSFunctionType(FrameStateType type) {
return type == FrameStateType::kInterpretedFunction ||
- type == FrameStateType::kJavaScriptBuiltinContinuation;
+ type == FrameStateType::kJavaScriptBuiltinContinuation ||
+ type == FrameStateType::kJavaScriptBuiltinContinuationWithCatch;
}
private:
@@ -143,7 +147,7 @@ static const int kFrameStateFunctionInput = 4;
static const int kFrameStateOuterStateInput = 5;
static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
-enum class ContinuationFrameStateMode { EAGER, LAZY };
+enum class ContinuationFrameStateMode { EAGER, LAZY, LAZY_WITH_CATCH };
Node* CreateStubBuiltinContinuationFrameState(
JSGraph* graph, Builtins::Name name, Node* context, Node* const* parameters,
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 676860fdcd..37d6c94a72 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -86,10 +86,13 @@ CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
#undef CHECKED_BINOP_DEF
Node* GraphAssembler::Float64RoundDown(Node* value) {
- if (machine()->Float64RoundDown().IsSupported()) {
- return graph()->NewNode(machine()->Float64RoundDown().op(), value);
- }
- return nullptr;
+ CHECK(machine()->Float64RoundDown().IsSupported());
+ return graph()->NewNode(machine()->Float64RoundDown().op(), value);
+}
+
+Node* GraphAssembler::Float64RoundTruncate(Node* value) {
+ CHECK(machine()->Float64RoundTruncate().IsSupported());
+ return graph()->NewNode(machine()->Float64RoundTruncate().op(), value);
}
Node* GraphAssembler::Projection(int index, Node* value) {
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index f3dd4e70f9..bc793df1a6 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -196,6 +196,7 @@ class GraphAssembler {
Node* Unreachable();
Node* Float64RoundDown(Node* value);
+ Node* Float64RoundTruncate(Node* value);
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 8c771c2af6..73e7f615db 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -9,7 +9,6 @@
#include <string>
#include "src/code-stubs.h"
-#include "src/compilation-info.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
@@ -23,13 +22,14 @@
#include "src/compiler/scheduler.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects/script-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/ostreams.h"
namespace v8 {
namespace internal {
namespace compiler {
-std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
const char* phase,
const char* suffix) {
EmbeddedVector<char, 256> filename(0);
@@ -255,7 +255,7 @@ class GraphC1Visualizer {
public:
GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
- void PrintCompilation(const CompilationInfo* info);
+ void PrintCompilation(const OptimizedCompilationInfo* info);
void PrintSchedule(const char* phase, const Schedule* schedule,
const SourcePositionTable* positions,
const InstructionSequence* instructions);
@@ -343,8 +343,7 @@ void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
os_ << name << " " << value << "\n";
}
-
-void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
+void GraphC1Visualizer::PrintCompilation(const OptimizedCompilationInfo* info) {
Tag tag(this, "compilation");
std::unique_ptr<char[]> name = info->GetDebugName();
if (info->IsOptimizing()) {
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 4b1c535549..7fa2f89867 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
+class OptimizedCompilationInfo;
namespace compiler {
@@ -24,7 +24,7 @@ class RegisterAllocationData;
class Schedule;
class SourcePositionTable;
-std::unique_ptr<char[]> GetVisualizerLogFileName(CompilationInfo* info,
+std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
const char* phase,
const char* suffix);
@@ -44,8 +44,9 @@ struct AsRPO {
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
struct AsC1VCompilation {
- explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
- const CompilationInfo* info_;
+ explicit AsC1VCompilation(const OptimizedCompilationInfo* info)
+ : info_(info) {}
+ const OptimizedCompilationInfo* info_;
};
struct AsScheduledGraph {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 91df483622..d1b59a5416 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -6,7 +6,6 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
@@ -16,6 +15,7 @@
#include "src/heap/heap-inl.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -285,6 +285,34 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
+void MoveOperandIfAliasedWithPoisonRegister(Instruction* call_instruction,
+ CodeGenerator* gen) {
+ IA32OperandConverter i(gen, call_instruction);
+ int const poison_index = i.InputInt32(1);
+ if (poison_index == -1) {
+ // No aliasing -> nothing to move.
+ return;
+ }
+
+ InstructionOperand* op = call_instruction->InputAt(poison_index);
+ if (op->IsImmediate() || op->IsConstant()) {
+ gen->tasm()->mov(kSpeculationPoisonRegister, i.ToImmediate(op));
+ } else {
+ gen->tasm()->mov(kSpeculationPoisonRegister, i.InputOperand(poison_index));
+ }
+}
+
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ IA32OperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->and_(value, kSpeculationPoisonRegister);
+ }
+}
+
} // namespace
#define ASSEMBLE_COMPARE(asm_instr) \
@@ -521,7 +549,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
__ push(eax); // Push eax so we can use it as a scratch register.
// Set a mask which has all bits set in the normal case, but has all
@@ -549,6 +577,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ call(code, RelocInfo::CODE_TARGET);
@@ -566,6 +595,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
@@ -592,6 +622,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
@@ -613,6 +644,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallWasm: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
@@ -634,6 +666,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallAddress: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
@@ -646,6 +679,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallJSFunction: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -696,6 +730,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
+ MoveOperandIfAliasedWithPoisonRegister(instr, this);
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
@@ -782,6 +817,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), ebp);
}
break;
+ case kArchRootsPointer:
+ // TODO(jgruber,v8:6666): Implement ia32 support.
+ UNREACHABLE();
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -875,8 +913,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(xmm1, xmm2);
__ movaps(xmm2, xmm0);
}
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
__ movaps(i.OutputDoubleRegister(), xmm3);
break;
}
@@ -1075,6 +1112,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kArchPoisonOnSpeculationWord:
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
+ __ and_(i.InputRegister(0), kSpeculationPoisonRegister);
+ break;
case kLFence:
__ lfence();
break;
@@ -1460,9 +1501,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxbl:
ASSEMBLE_MOVX(movzx_b);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movb: {
size_t index = 0;
@@ -1472,13 +1515,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_b(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movsxwl:
ASSEMBLE_MOVX(movsx_w);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxwl:
ASSEMBLE_MOVX(movzx_w);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movw: {
size_t index = 0;
@@ -1488,11 +1534,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_w(operand, i.InputRegister(index));
}
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -1719,6 +1767,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(2), i.InputInt8(1) << 4);
break;
}
+ case kIA32F32x4SConvertI32x4: {
+ __ Cvtdq2ps(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4UConvertI32x4: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg); // zeros
+ __ pblendw(kScratchDoubleReg, dst, 0x55); // get lo 16 bits
+ __ psubd(dst, kScratchDoubleReg); // get hi 16 bits
+ __ cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // convert lo exactly
+ __ psrld(dst, 1); // divide by 2 to get in unsigned range
+ __ cvtdq2ps(dst, dst); // convert hi exactly
+ __ addps(dst, dst); // double hi, exactly
+ __ addps(dst, kScratchDoubleReg); // add hi and lo, may round.
+ break;
+ }
+ case kAVXF32x4UConvertI32x4: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ __ vpxor(kScratchDoubleReg, kScratchDoubleReg,
+ kScratchDoubleReg); // zeros
+ __ vpblendw(kScratchDoubleReg, kScratchDoubleReg, src,
+ 0x55); // get lo 16 bits
+ __ vpsubd(dst, src, kScratchDoubleReg); // get hi 16 bits
+ __ vcvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // convert lo exactly
+ __ vpsrld(dst, dst, 1); // divide by 2 to get in unsigned range
+ __ vcvtdq2ps(dst, dst); // convert hi exactly
+ __ vaddps(dst, dst, dst); // double hi, exactly
+ __ vaddps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
+ break;
+ }
case kSSEF32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
@@ -1763,6 +1845,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(0));
break;
}
+ case kIA32F32x4RecipApprox: {
+ __ Rcpps(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
+ case kIA32F32x4RecipSqrtApprox: {
+ __ Rsqrtps(i.OutputSimd128Register(), i.InputOperand(0));
+ break;
+ }
case kSSEF32x4Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ addps(i.OutputSimd128Register(), i.InputOperand(1));
@@ -1774,6 +1864,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEF32x4AddHoriz: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE3);
+ __ haddps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4AddHoriz: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vhaddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kSSEF32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ subps(i.OutputSimd128Register(), i.InputOperand(1));
@@ -1929,6 +2031,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI32x4AddHoriz: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ __ phaddd(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI32x4AddHoriz: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vphaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kSSEI32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2185,6 +2299,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
+ case kSSEI16x8AddHoriz: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ __ phaddw(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXI16x8AddHoriz: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vphaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kSSEI16x8Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2950,6 +3076,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vxorps(dst, dst, i.InputSimd128Register(2));
break;
}
+ case kIA32S8x16Shuffle: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Register tmp = i.TempRegister(0);
+ // Prepare 16-byte boundary buffer for shuffle control mask
+ __ mov(tmp, esp);
+ __ movups(dst, i.InputOperand(0));
+ __ and_(esp, -16);
+ if (instr->InputCount() == 5) { // only one input operand
+ for (int j = 4; j > 0; j--) {
+ uint32_t mask = i.InputUint32(j);
+ __ push(Immediate(mask));
+ }
+ __ Pshufb(dst, Operand(esp, 0));
+ } else { // two input operands
+ DCHECK_EQ(6, instr->InputCount());
+ for (int j = 5; j > 1; j--) {
+ uint32_t lanes = i.InputUint32(j);
+ uint32_t mask = 0;
+ for (int k = 0; k < 32; k += 8) {
+ uint8_t lane = lanes >> k;
+ mask |= (lane < kSimd128Size ? lane : 0x80) << k;
+ }
+ __ push(Immediate(mask));
+ }
+ __ Pshufb(dst, Operand(esp, 0));
+ __ movups(kScratchDoubleReg, i.InputOperand(1));
+ for (int j = 5; j > 1; j--) {
+ uint32_t lanes = i.InputUint32(j);
+ uint32_t mask = 0;
+ for (int k = 0; k < 32; k += 8) {
+ uint8_t lane = lanes >> k;
+ mask |= (lane >= kSimd128Size ? (lane & 0xF) : 0x80) << k;
+ }
+ __ push(Immediate(mask));
+ }
+ __ Pshufb(kScratchDoubleReg, Operand(esp, 0));
+ __ por(dst, kScratchDoubleReg);
+ }
+ __ mov(esp, tmp);
+ break;
+ }
+ case kIA32S32x4Swizzle: {
+ __ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), i.InputInt8(1));
+ break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
@@ -3119,7 +3290,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
- UNREACHABLE();
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ condition = NegateFlagsCondition(condition);
+ __ setcc(FlagsConditionToCondition(condition), kSpeculationPoisonRegister);
+ __ add(kSpeculationPoisonRegister, Immediate(255));
+ __ sar(kSpeculationPoisonRegister, 31u);
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -3437,6 +3616,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 55833df4d4..e5b1e85082 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -121,12 +121,19 @@ namespace compiler {
V(AVXF32x4ExtractLane) \
V(SSEF32x4ReplaceLane) \
V(AVXF32x4ReplaceLane) \
+ V(IA32F32x4SConvertI32x4) \
+ V(SSEF32x4UConvertI32x4) \
+ V(AVXF32x4UConvertI32x4) \
V(SSEF32x4Abs) \
V(AVXF32x4Abs) \
V(SSEF32x4Neg) \
V(AVXF32x4Neg) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
V(SSEF32x4Add) \
V(AVXF32x4Add) \
+ V(SSEF32x4AddHoriz) \
+ V(AVXF32x4AddHoriz) \
V(SSEF32x4Sub) \
V(AVXF32x4Sub) \
V(SSEF32x4Mul) \
@@ -154,6 +161,8 @@ namespace compiler {
V(AVXI32x4ShrS) \
V(SSEI32x4Add) \
V(AVXI32x4Add) \
+ V(SSEI32x4AddHoriz) \
+ V(AVXI32x4AddHoriz) \
V(SSEI32x4Sub) \
V(AVXI32x4Sub) \
V(SSEI32x4Mul) \
@@ -193,6 +202,8 @@ namespace compiler {
V(AVXI16x8Add) \
V(SSEI16x8AddSaturateS) \
V(AVXI16x8AddSaturateS) \
+ V(SSEI16x8AddHoriz) \
+ V(AVXI16x8AddHoriz) \
V(SSEI16x8Sub) \
V(AVXI16x8Sub) \
V(SSEI16x8SubSaturateS) \
@@ -280,7 +291,9 @@ namespace compiler {
V(SSES128Xor) \
V(AVXS128Xor) \
V(SSES128Select) \
- V(AVXS128Select)
+ V(AVXS128Select) \
+ V(IA32S8x16Shuffle) \
+ V(IA32S32x4Swizzle)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 3c2207eee2..0d592afbef 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -103,12 +103,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4ExtractLane:
case kSSEF32x4ReplaceLane:
case kAVXF32x4ReplaceLane:
+ case kIA32F32x4SConvertI32x4:
+ case kSSEF32x4UConvertI32x4:
+ case kAVXF32x4UConvertI32x4:
case kSSEF32x4Abs:
case kAVXF32x4Abs:
case kSSEF32x4Neg:
case kAVXF32x4Neg:
+ case kIA32F32x4RecipApprox:
+ case kIA32F32x4RecipSqrtApprox:
case kSSEF32x4Add:
case kAVXF32x4Add:
+ case kSSEF32x4AddHoriz:
+ case kAVXF32x4AddHoriz:
case kSSEF32x4Sub:
case kAVXF32x4Sub:
case kSSEF32x4Mul:
@@ -136,6 +143,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI32x4ShrS:
case kSSEI32x4Add:
case kAVXI32x4Add:
+ case kSSEI32x4AddHoriz:
+ case kAVXI32x4AddHoriz:
case kSSEI32x4Sub:
case kAVXI32x4Sub:
case kSSEI32x4Mul:
@@ -175,6 +184,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI16x8Add:
case kSSEI16x8AddSaturateS:
case kAVXI16x8AddSaturateS:
+ case kSSEI16x8AddHoriz:
+ case kAVXI16x8AddHoriz:
case kSSEI16x8Sub:
case kAVXI16x8Sub:
case kSSEI16x8SubSaturateS:
@@ -263,6 +274,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXS128Xor:
case kSSES128Select:
case kAVXS128Select:
+ case kIA32S8x16Shuffle:
+ case kIA32S32x4Swizzle:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index aa6e9fd607..f49935d8e0 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -74,7 +74,7 @@ class IA32OperandGenerator final : public OperandGenerator {
#if 0
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Handle<HeapObject> value = HeapConstantOf(node->op());
Isolate* isolate = value->GetIsolate();
return !isolate->heap()->InNewSpace(*value);
#else
@@ -94,13 +94,13 @@ class IA32OperandGenerator final : public OperandGenerator {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == nullptr)
? 0
- : OpParameter<int32_t>(displacement_node);
+ : OpParameter<int32_t>(displacement_node->op());
if (displacement_mode == kNegativeDisplacement) {
displacement = -displacement;
}
if (base != nullptr) {
if (base->opcode() == IrOpcode::kInt32Constant) {
- displacement += OpParameter<int32_t>(base);
+ displacement += OpParameter<int32_t>(base->op());
base = nullptr;
}
}
@@ -214,7 +214,8 @@ void VisitRRISimd(InstructionSelector* selector, Node* node,
ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand operand1 =
+ g.UseImmediate(OpParameter<int32_t>(node->op()));
InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1,
@@ -288,7 +289,7 @@ void InstructionSelector::VisitLoad(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
code |= MiscField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
@@ -421,7 +422,7 @@ void VisitBinop(InstructionSelector* selector, Node* node,
Node* right = m.right().node();
InstructionOperand inputs[6];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -462,29 +463,15 @@ void VisitBinop(InstructionSelector* selector, Node* node,
}
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
outputs[output_count++] = g.DefineSameAsFirst(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsByteRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -1080,51 +1067,20 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
DCHECK_EQ(IrOpcode::kLoad, left->opcode());
IA32OperandGenerator g(selector);
size_t input_count = 0;
- InstructionOperand inputs[6];
+ InstructionOperand inputs[4];
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
opcode |= AddressingModeField::encode(addressing_mode);
- opcode = cont->Encode(opcode);
inputs[input_count++] = right;
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- InstructionOperand output = g.DefineAsRegister(cont->result());
- selector->Emit(opcode, 1, &output, input_count, inputs);
- } else {
- DCHECK(cont->IsTrap());
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
}
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- IA32OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1145,8 +1101,8 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
if (node->opcode() == IrOpcode::kInt32Constant ||
node->opcode() == IrOpcode::kInt64Constant) {
int64_t constant = node->opcode() == IrOpcode::kInt32Constant
- ? OpParameter<int32_t>(node)
- : OpParameter<int64_t>(node);
+ ? OpParameter<int32_t>(node->op())
+ : OpParameter<int64_t>(node->op());
if (hint == MachineType::Int8()) {
if (constant >= std::numeric_limits<int8_t>::min() &&
constant <= std::numeric_limits<int8_t>::max()) {
@@ -1299,8 +1255,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kIA32StackCheck);
CHECK(cont->IsBranch());
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ selector->EmitWithContinuation(opcode, cont);
return;
}
}
@@ -1406,20 +1361,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ value_operand, g.TempImmediate(-sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -1779,6 +1734,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_BINOP_LIST(V) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Min) \
@@ -1788,6 +1744,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4Lt) \
V(F32x4Le) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -1802,6 +1759,7 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4GeU) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -1837,14 +1795,17 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
-#define SIMD_INT_UNOP_LIST(V) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
V(I8x16Neg)
-#define SIMD_OTHER_UNOP_LIST(V) \
- V(F32x4Abs) \
- V(F32x4Neg) \
+#define SIMD_UNOP_PREFIX_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
@@ -1868,7 +1829,8 @@ void InstructionSelector::VisitF32x4Splat(Node* node) {
void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ InstructionOperand operand1 =
+ g.UseImmediate(OpParameter<int32_t>(node->op()));
if (IsSupported(AVX)) {
Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
} else {
@@ -1876,6 +1838,16 @@ void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
}
}
+void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4UConvertI32x4, g.DefineAsRegister(node), operand0);
+ } else {
+ Emit(kSSEF32x4UConvertI32x4, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
#define SIMD_I8X16_SHIFT_OPCODES(V) \
V(I8x16Shl) \
V(I8x16ShrS) \
@@ -1933,65 +1905,67 @@ SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#define VISIT_SIMD_EXTRACT_LANE(Type) \
void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
IA32OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
-#define VISIT_SIMD_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
- InstructionOperand operand2 = g.Use(node->InputAt(1)); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
- operand1, operand2); \
- } else { \
- Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
- operand1, operand2); \
- } \
+#define VISIT_SIMD_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ InstructionOperand operand2 = g.Use(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
}
SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_INT_TYPES
-#define VISIT_SIMD_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Opcode, g.DefineAsRegister(node), operand0, operand1); \
- } else { \
- Emit(kSSE##Opcode, g.DefineSameAsFirst(node), operand0, operand1); \
- } \
+#define VISIT_SIMD_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = \
+ g.UseImmediate(OpParameter<int32_t>(node->op())); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Opcode, g.DefineAsRegister(node), operand0, operand1); \
+ } else { \
+ Emit(kSSE##Opcode, g.DefineSameAsFirst(node), operand0, operand1); \
+ } \
}
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_INT_UNOP(Opcode) \
+#define VISIT_SIMD_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
-#undef VISIT_SIMD_INT_UNOP
-#undef SIMD_INT_UNOP_LIST
+SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
+#undef VISIT_SIMD_UNOP
+#undef SIMD_UNOP_LIST
-#define VISIT_SIMD_OTHER_UNOP(Opcode) \
+#define VISIT_SIMD_UNOP_PREFIX(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
-#undef VISIT_SIMD_OTHER_UNOP
-#undef SIMD_OTHER_UNOP_LIST
+SIMD_UNOP_PREFIX_LIST(VISIT_SIMD_UNOP_PREFIX)
+#undef VISIT_SIMD_UNOP_PREFIX
+#undef SIMD_UNOP_PREFIX_LIST
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2009,6 +1983,43 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ static const int kMaxSwizzleIndex = 15;
+ static const int kMaxShuffleIndex = 31;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ uint8_t mask = CanonicalizeShuffle(node);
+ uint8_t shuffle32x4[4];
+ IA32OperandGenerator g(this);
+ InstructionOperand output = g.DefineAsRegister(node);
+ InstructionOperand inputs[6];
+ InstructionOperand temps[1];
+ size_t input_count = 0;
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ if (mask == kMaxSwizzleIndex) {
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kIA32S32x4Swizzle, output, g.Use(input0),
+ g.UseImmediate((shuffle32x4[0] & 3) | ((shuffle32x4[1] & 3) << 2) |
+ ((shuffle32x4[2] & 3) << 4) |
+ ((shuffle32x4[3] & 3) << 6)));
+ return;
+ }
+ // TODO(ia32): handle non 32x4 swizzles here
+ inputs[input_count++] = g.Use(input0);
+ } else {
+ DCHECK_EQ(kMaxShuffleIndex, mask);
+ USE(kMaxShuffleIndex);
+ inputs[input_count++] = g.Use(input0);
+ inputs[input_count++] = g.Use(input1);
+ }
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 4, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 8, mask));
+ inputs[input_count++] = g.UseImmediate(Pack4Lanes(shuffle + 12, mask));
+ temps[0] = g.TempRegister();
+ Emit(kIA32S8x16Shuffle, 1, &output, input_count, inputs, 1, temps);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2039,9 +2050,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 035833af0f..d5820faa5e 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -66,9 +66,11 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchStackPointer) \
V(ArchFramePointer) \
V(ArchParentFramePointer) \
+ V(ArchRootsPointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(ArchStackSlot) \
+ V(ArchPoisonOnSpeculationWord) \
V(Word32AtomicLoadInt8) \
V(Word32AtomicLoadUint8) \
V(Word32AtomicLoadInt16) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 905ae4e6f0..9219e9a520 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -6,6 +6,7 @@
#include "src/base/adapters.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -114,17 +115,24 @@ void InstructionScheduler::EndBlock(RpoNumber rpo) {
operands_map_.clear();
}
+void InstructionScheduler::AddTerminator(Instruction* instr) {
+ ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
+ // Make sure that basic block terminators are not moved by adding them
+ // as successor of every instruction.
+ for (ScheduleGraphNode* node : graph_) {
+ node->AddSuccessor(new_node);
+ }
+ graph_.push_back(new_node);
+}
void InstructionScheduler::AddInstruction(Instruction* instr) {
ScheduleGraphNode* new_node = new (zone()) ScheduleGraphNode(zone(), instr);
- if (IsBlockTerminator(instr)) {
- // Make sure that basic block terminators are not moved by adding them
- // as successor of every instruction.
- for (ScheduleGraphNode* node : graph_) {
- node->AddSuccessor(new_node);
- }
- } else if (IsFixedRegisterParameter(instr)) {
+ // We should not have branches in the middle of a block.
+ DCHECK_NE(instr->flags_mode(), kFlags_branch);
+ DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
+
+ if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
last_live_in_reg_marker_->AddSuccessor(new_node);
}
@@ -240,10 +248,17 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchNop:
case kArchFramePointer:
case kArchParentFramePointer:
+ case kArchRootsPointer:
case kArchStackSlot: // Despite its name this opcode will produce a
// reference to a frame slot, so it is not affected
// by the arm64 dual stack issues mentioned below.
case kArchComment:
+ case kArchDeoptimize:
+ case kArchJmp:
+ case kArchLookupSwitch:
+ case kArchRet:
+ case kArchTableSwitch:
+ case kArchThrowTerminator:
return kNoOpcodeFlags;
case kArchTruncateDoubleToI:
@@ -275,6 +290,11 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// must not be reordered with instruction with side effects.
return kIsLoadOperation;
+ case kArchPoisonOnSpeculationWord:
+ // While poisoning operations have no side effect, they must not be
+ // reordered relative to branches.
+ return kHasSideEffect;
+
case kArchPrepareCallCFunction:
case kArchSaveCallerRegisters:
case kArchRestoreCallerRegisters:
@@ -283,23 +303,13 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchCallCodeObject:
case kArchCallJSFunction:
case kArchCallWasmFunction:
- return kHasSideEffect;
-
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
- return kHasSideEffect | kIsBlockTerminator;
-
- case kArchDeoptimize:
- case kArchJmp:
- case kArchLookupSwitch:
- case kArchTableSwitch:
- case kArchRet:
case kArchDebugAbort:
case kArchDebugBreak:
- case kArchThrowTerminator:
- return kIsBlockTerminator;
+ return kHasSideEffect;
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
@@ -362,14 +372,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
UNREACHABLE();
}
-
-bool InstructionScheduler::IsBlockTerminator(const Instruction* instr) const {
- return ((GetInstructionFlags(instr) & kIsBlockTerminator) ||
- (instr->flags_mode() == kFlags_branch) ||
- (instr->flags_mode() == kFlags_branch_and_poison));
-}
-
-
void InstructionScheduler::ComputeTotalLatencies() {
for (ScheduleGraphNode* node : base::Reversed(graph_)) {
int max_latency = 0;
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index 3d7b88f8b6..87d41d30d6 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -16,12 +16,10 @@ namespace compiler {
// scheduler is aware of dependencies between instructions.
enum ArchOpcodeFlags {
kNoOpcodeFlags = 0,
- kIsBlockTerminator = 1, // The instruction marks the end of a basic block
- // e.g.: jump and return instructions.
- kHasSideEffect = 2, // The instruction has some side effects (memory
- // store, function call...)
- kIsLoadOperation = 4, // The instruction is a memory load.
- kMayNeedDeoptOrTrapCheck = 8, // The instruction may be associated with a
+ kHasSideEffect = 1, // The instruction has some side effects (memory
+ // store, function call...)
+ kIsLoadOperation = 2, // The instruction is a memory load.
+ kMayNeedDeoptOrTrapCheck = 4, // The instruction may be associated with a
// deopt or trap check which must be run before
// instruction e.g. div on Intel platform which
// will raise an exception when the divisor is
@@ -36,6 +34,7 @@ class InstructionScheduler final : public ZoneObject {
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
+ void AddTerminator(Instruction* instr);
static bool SchedulerSupported();
@@ -153,9 +152,6 @@ class InstructionScheduler final : public ZoneObject {
int GetInstructionFlags(const Instruction* instr) const;
int GetTargetInstructionFlags(const Instruction* instr) const;
- // Return true if the instruction is a basic block terminator.
- bool IsBlockTerminator(const Instruction* instr) const;
-
// Check whether the given instruction has side effects (e.g. function call,
// memory store).
bool HasSideEffect(const Instruction* instr) const {
@@ -208,6 +204,8 @@ class InstructionScheduler final : public ZoneObject {
InstructionSequence* sequence_;
ZoneVector<ScheduleGraphNode*> graph_;
+ friend class InstructionSchedulerTester;
+
// Last side effect instruction encountered while building the graph.
ScheduleGraphNode* last_side_effect_instr_;
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 56ccd9fc64..824b6a5376 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -15,15 +15,52 @@ namespace v8 {
namespace internal {
namespace compiler {
+struct CaseInfo {
+ int32_t value; // The case value.
+ int32_t order; // The order for lowering to comparisons (less means earlier).
+ BasicBlock* branch; // The basic blocks corresponding to the case value.
+};
+
+inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
+ return l.order < r.order;
+}
+
// Helper struct containing data about a table or lookup switch.
-struct SwitchInfo {
- int32_t min_value; // minimum value of {case_values}
- int32_t max_value; // maximum value of {case_values}
- size_t value_range; // |max_value - min_value| + 1
- size_t case_count; // number of cases
- int32_t* case_values; // actual case values, unsorted
- BasicBlock** case_branches; // basic blocks corresponding to case values
- BasicBlock* default_branch; // default branch target
+class SwitchInfo {
+ public:
+ SwitchInfo(ZoneVector<CaseInfo>& cases, int32_t min_value, int32_t max_value,
+ BasicBlock* default_branch)
+ : cases_(cases),
+ min_value_(min_value),
+ max_value_(min_value),
+ default_branch_(default_branch) {
+ if (cases.size() != 0) {
+ DCHECK_LE(min_value, max_value);
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and
+ // {max_value} is 2^31-1, so don't assume that it's non-zero below.
+ value_range_ =
+ 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
+ } else {
+ value_range_ = 0;
+ }
+ }
+
+ int32_t min_value() const { return min_value_; }
+ int32_t max_value() const { return max_value_; }
+ size_t value_range() const { return value_range_; }
+ size_t case_count() const { return cases_.size(); }
+ const CaseInfo& GetCase(size_t i) const {
+ DCHECK_LT(i, cases_.size());
+ return cases_[i];
+ }
+ BasicBlock* default_branch() const { return default_branch_; }
+
+ private:
+ const ZoneVector<CaseInfo>& cases_;
+ int32_t min_value_; // minimum value of {cases_}
+ int32_t max_value_; // maximum value of {cases_}
+ size_t value_range_; // |max_value - min_value| + 1
+ BasicBlock* default_branch_;
};
// A helper class for the instruction selector that simplifies construction of
@@ -91,17 +128,23 @@ class OperandGenerator {
}
InstructionOperand UseAnyAtEnd(Node* node) {
- return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ return Use(node, UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT,
UnallocatedOperand::USED_AT_END,
GetVReg(node)));
}
InstructionOperand UseAny(Node* node) {
- return Use(node, UnallocatedOperand(UnallocatedOperand::ANY,
+ return Use(node, UnallocatedOperand(UnallocatedOperand::REGISTER_OR_SLOT,
UnallocatedOperand::USED_AT_START,
GetVReg(node)));
}
+ InstructionOperand UseRegisterOrSlotOrConstant(Node* node) {
+ return Use(node, UnallocatedOperand(
+ UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT,
+ UnallocatedOperand::USED_AT_START, GetVReg(node)));
+ }
+
InstructionOperand UseRegister(Node* node) {
return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START,
@@ -244,22 +287,22 @@ class OperandGenerator {
static Constant ToConstant(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- return Constant(OpParameter<int32_t>(node));
+ return Constant(OpParameter<int32_t>(node->op()));
case IrOpcode::kInt64Constant:
- return Constant(OpParameter<int64_t>(node));
+ return Constant(OpParameter<int64_t>(node->op()));
case IrOpcode::kFloat32Constant:
- return Constant(OpParameter<float>(node));
+ return Constant(OpParameter<float>(node->op()));
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
- return Constant(OpParameter<RelocatablePtrConstantInfo>(node));
+ return Constant(OpParameter<RelocatablePtrConstantInfo>(node->op()));
case IrOpcode::kFloat64Constant:
case IrOpcode::kNumberConstant:
- return Constant(OpParameter<double>(node));
+ return Constant(OpParameter<double>(node->op()));
case IrOpcode::kExternalConstant:
case IrOpcode::kComment:
- return Constant(OpParameter<ExternalReference>(node));
+ return Constant(OpParameter<ExternalReference>(node->op()));
case IrOpcode::kHeapConstant:
- return Constant(OpParameter<Handle<HeapObject>>(node));
+ return Constant(HeapConstantOf(node->op()));
case IrOpcode::kDeadValue: {
switch (DeadValueRepresentationOf(node->op())) {
case MachineRepresentation::kBit:
@@ -286,9 +329,9 @@ class OperandGenerator {
static Constant ToNegatedConstant(const Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- return Constant(-OpParameter<int32_t>(node));
+ return Constant(-OpParameter<int32_t>(node->op()));
case IrOpcode::kInt64Constant:
- return Constant(-OpParameter<int64_t>(node));
+ return Constant(-OpParameter<int64_t>(node->op()));
default:
break;
}
@@ -350,210 +393,6 @@ class OperandGenerator {
InstructionSelector* selector_;
};
-
-// The flags continuation is a way to combine a branch or a materialization
-// of a boolean value with an instruction that sets the flags register.
-// The whole instruction is treated as a unit by the register allocator, and
-// thus no spills or moves can be introduced between the flags-setting
-// instruction and the branch or set it should be combined with.
-class FlagsContinuation final {
- public:
- FlagsContinuation() : mode_(kFlags_none) {}
-
- // Creates a new flags continuation from the given condition and true/false
- // blocks.
- static FlagsContinuation ForBranch(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block,
- LoadPoisoning masking) {
- FlagsMode mode = masking == LoadPoisoning::kDoPoison
- ? kFlags_branch_and_poison
- : kFlags_branch;
- return FlagsContinuation(mode, condition, true_block, false_block);
- }
-
- static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block) {
- return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
- false_block);
- }
-
- // Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimize(FlagsCondition condition,
- DeoptimizeKind kind,
- DeoptimizeReason reason,
- VectorSlotPair const& feedback,
- Node* frame_state,
- LoadPoisoning masking) {
- FlagsMode mode = masking == LoadPoisoning::kDoPoison
- ? kFlags_deoptimize_and_poison
- : kFlags_deoptimize;
- return FlagsContinuation(mode, condition, kind, reason, feedback,
- frame_state);
- }
-
- // Creates a new flags continuation for a boolean value.
- static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
- return FlagsContinuation(condition, result);
- }
-
- // Creates a new flags continuation for a wasm trap.
- static FlagsContinuation ForTrap(FlagsCondition condition,
- Runtime::FunctionId trap_id, Node* result) {
- return FlagsContinuation(condition, trap_id, result);
- }
-
- bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const {
- return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
- }
- bool IsDeoptimize() const {
- return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsPoisoned() const {
- return mode_ == kFlags_branch_and_poison ||
- mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsSet() const { return mode_ == kFlags_set; }
- bool IsTrap() const { return mode_ == kFlags_trap; }
- FlagsCondition condition() const {
- DCHECK(!IsNone());
- return condition_;
- }
- DeoptimizeKind kind() const {
- DCHECK(IsDeoptimize());
- return kind_;
- }
- DeoptimizeReason reason() const {
- DCHECK(IsDeoptimize());
- return reason_;
- }
- VectorSlotPair const& feedback() const {
- DCHECK(IsDeoptimize());
- return feedback_;
- }
- Node* frame_state() const {
- DCHECK(IsDeoptimize());
- return frame_state_or_result_;
- }
- Node* result() const {
- DCHECK(IsSet());
- return frame_state_or_result_;
- }
- Runtime::FunctionId trap_id() const {
- DCHECK(IsTrap());
- return trap_id_;
- }
- BasicBlock* true_block() const {
- DCHECK(IsBranch());
- return true_block_;
- }
- BasicBlock* false_block() const {
- DCHECK(IsBranch());
- return false_block_;
- }
-
- void Negate() {
- DCHECK(!IsNone());
- condition_ = NegateFlagsCondition(condition_);
- }
-
- void Commute() {
- DCHECK(!IsNone());
- condition_ = CommuteFlagsCondition(condition_);
- }
-
- void Overwrite(FlagsCondition condition) { condition_ = condition; }
-
- void OverwriteAndNegateIfEqual(FlagsCondition condition) {
- DCHECK(condition_ == kEqual || condition_ == kNotEqual);
- bool negate = condition_ == kEqual;
- condition_ = condition;
- if (negate) Negate();
- }
-
- void OverwriteUnsignedIfSigned() {
- switch (condition_) {
- case kSignedLessThan:
- condition_ = kUnsignedLessThan;
- break;
- case kSignedLessThanOrEqual:
- condition_ = kUnsignedLessThanOrEqual;
- break;
- case kSignedGreaterThan:
- condition_ = kUnsignedGreaterThan;
- break;
- case kSignedGreaterThanOrEqual:
- condition_ = kUnsignedGreaterThanOrEqual;
- break;
- default:
- break;
- }
- }
-
- // Encodes this flags continuation into the given opcode.
- InstructionCode Encode(InstructionCode opcode) {
- opcode |= FlagsModeField::encode(mode_);
- if (mode_ != kFlags_none) {
- opcode |= FlagsConditionField::encode(condition_);
- }
- return opcode;
- }
-
- private:
- FlagsContinuation(FlagsMode mode, FlagsCondition condition,
- BasicBlock* true_block, BasicBlock* false_block)
- : mode_(mode),
- condition_(condition),
- true_block_(true_block),
- false_block_(false_block) {
- DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
- DCHECK_NOT_NULL(true_block);
- DCHECK_NOT_NULL(false_block);
- }
-
- FlagsContinuation(FlagsMode mode, FlagsCondition condition,
- DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback, Node* frame_state)
- : mode_(mode),
- condition_(condition),
- kind_(kind),
- reason_(reason),
- feedback_(feedback),
- frame_state_or_result_(frame_state) {
- DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
- DCHECK_NOT_NULL(frame_state);
- }
-
- FlagsContinuation(FlagsCondition condition, Node* result)
- : mode_(kFlags_set),
- condition_(condition),
- frame_state_or_result_(result) {
- DCHECK_NOT_NULL(result);
- }
-
- FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
- Node* result)
- : mode_(kFlags_trap),
- condition_(condition),
- frame_state_or_result_(result),
- trap_id_(trap_id) {
- DCHECK_NOT_NULL(result);
- }
-
- FlagsMode const mode_;
- FlagsCondition condition_;
- DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
- DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
- VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
- Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
- // or mode_ == kFlags_set.
- BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
- BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
- Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 954a1fc272..6fce9ea470 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -25,10 +25,10 @@ InstructionSelector::InstructionSelector(
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
- EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
- EnableSerialization enable_serialization, LoadPoisoning load_poisoning)
+ EnableSerialization enable_serialization,
+ PoisoningMitigationLevel poisoning_enabled)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -38,6 +38,8 @@ InstructionSelector::InstructionSelector(
schedule_(schedule),
current_block_(nullptr),
instructions_(zone),
+ continuation_inputs_(sequence->zone()),
+ continuation_outputs_(sequence->zone()),
defined_(node_count, false, zone),
used_(node_count, false, zone),
effect_level_(node_count, 0, zone),
@@ -48,11 +50,12 @@ InstructionSelector::InstructionSelector(
enable_scheduling_(enable_scheduling),
enable_serialization_(enable_serialization),
enable_switch_jump_table_(enable_switch_jump_table),
- enable_speculation_poison_(enable_speculation_poison),
- load_poisoning_(load_poisoning),
+ poisoning_enabled_(poisoning_enabled),
frame_(frame),
instruction_selection_failed_(false) {
instructions_.reserve(node_count);
+ continuation_inputs_.reserve(5);
+ continuation_outputs_.reserve(2);
}
bool InstructionSelector::SelectInstructions() {
@@ -92,9 +95,13 @@ bool InstructionSelector::SelectInstructions() {
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
StartBlock(RpoNumber::FromInt(block->rpo_number()));
- while (start-- > end) {
- UpdateRenames(instructions_[start]);
- AddInstruction(instructions_[start]);
+ if (end != start) {
+ while (start-- > end + 1) {
+ UpdateRenames(instructions_[start]);
+ AddInstruction(instructions_[start]);
+ }
+ UpdateRenames(instructions_[end]);
+ AddTerminator(instructions_[end]);
}
EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
@@ -123,6 +130,14 @@ void InstructionSelector::EndBlock(RpoNumber rpo) {
}
}
+void InstructionSelector::AddTerminator(Instruction* instr) {
+ if (UseInstructionScheduling()) {
+ DCHECK_NOT_NULL(scheduler_);
+ scheduler_->AddTerminator(instr);
+ } else {
+ sequence()->AddInstruction(instr);
+ }
+}
void InstructionSelector::AddInstruction(Instruction* instr) {
if (UseInstructionScheduling()) {
@@ -448,7 +463,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
return InstructionOperand();
}
- Handle<HeapObject> constant = OpParameter<Handle<HeapObject>>(input);
+ Handle<HeapObject> constant = HeapConstantOf(input->op());
Heap::RootListIndex root_index;
if (isolate->heap()->IsRootHandle(constant, &root_index) &&
root_index == Heap::kOptimizedOutRootIndex) {
@@ -651,6 +666,105 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
return entries;
}
+Instruction* InstructionSelector::EmitWithContinuation(
+ InstructionCode opcode, FlagsContinuation* cont) {
+ return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
+}
+
+Instruction* InstructionSelector::EmitWithContinuation(
+ InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
+ return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
+}
+
+Instruction* InstructionSelector::EmitWithContinuation(
+ InstructionCode opcode, InstructionOperand a, InstructionOperand b,
+ FlagsContinuation* cont) {
+ InstructionOperand inputs[] = {a, b};
+ return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
+ cont);
+}
+
+Instruction* InstructionSelector::EmitWithContinuation(
+ InstructionCode opcode, InstructionOperand a, InstructionOperand b,
+ InstructionOperand c, FlagsContinuation* cont) {
+ InstructionOperand inputs[] = {a, b, c};
+ return EmitWithContinuation(opcode, 0, nullptr, arraysize(inputs), inputs,
+ cont);
+}
+
+Instruction* InstructionSelector::EmitWithContinuation(
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
+ OperandGenerator g(this);
+
+ opcode = cont->Encode(opcode);
+
+ continuation_inputs_.resize(0);
+ for (size_t i = 0; i < input_count; i++) {
+ continuation_inputs_.push_back(inputs[i]);
+ }
+
+ continuation_outputs_.resize(0);
+ for (size_t i = 0; i < output_count; i++) {
+ continuation_outputs_.push_back(outputs[i]);
+ }
+
+ if (cont->IsBranch()) {
+ continuation_inputs_.push_back(g.Label(cont->true_block()));
+ continuation_inputs_.push_back(g.Label(cont->false_block()));
+ } else if (cont->IsDeoptimize()) {
+ opcode |= MiscField::encode(static_cast<int>(input_count));
+ AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
+ } else if (cont->IsSet()) {
+ continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
+ } else if (cont->IsTrap()) {
+ continuation_inputs_.push_back(g.UseImmediate(cont->trap_id()));
+ } else {
+ DCHECK(cont->IsNone());
+ }
+
+ size_t const emit_inputs_size = continuation_inputs_.size();
+ auto* emit_inputs =
+ emit_inputs_size ? &continuation_inputs_.front() : nullptr;
+ size_t const emit_outputs_size = continuation_outputs_.size();
+ auto* emit_outputs =
+ emit_outputs_size ? &continuation_outputs_.front() : nullptr;
+ return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
+ emit_inputs, 0, nullptr);
+}
+
+void InstructionSelector::AppendDeoptimizeArguments(
+ InstructionOperandVector* args, DeoptimizeKind kind,
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state) {
+ OperandGenerator g(this);
+ FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
+ DCHECK_NE(DeoptimizeKind::kLazy, kind);
+ int const state_id =
+ sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
+ args->push_back(g.TempImmediate(state_id));
+ StateObjectDeduplicator deduplicator(instruction_zone());
+ AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
+ args, FrameStateInputKind::kAny,
+ instruction_zone());
+}
+
+Instruction* InstructionSelector::EmitDeoptimize(
+ InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
+ size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state) {
+ InstructionOperandVector args(instruction_zone());
+ for (size_t i = 0; i < input_count; ++i) {
+ args.push_back(inputs[i]);
+ }
+ opcode |= MiscField::encode(static_cast<int>(input_count));
+ AppendDeoptimizeArguments(&args, kind, reason, feedback, frame_state);
+ return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
+ nullptr);
+}
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -776,12 +890,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseImmediate(callee)
: call_use_fixed_target_reg
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
-#ifdef V8_EMBEDDED_BUILTINS
: is_tail_call ? g.UseUniqueRegister(callee)
: g.UseRegister(callee));
-#else
- : g.UseRegister(callee));
-#endif
break;
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
@@ -809,10 +919,17 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
+ // Argument 1 is used for poison-alias index (encoded in a word-sized
+ // immediate. This an index of the operand that aliases with poison register
+ // or -1 if there is no aliasing.
+ buffer->instruction_args.push_back(g.TempImmediate(-1));
+ const size_t poison_alias_index = 1;
+ DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
+
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
- // arg 1 : deoptimization id.
- // arg 2 - arg (n + 1) : value inputs to the frame state.
+ // arg 2 : deoptimization id.
+ // arg 3 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@@ -848,7 +965,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
- DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
+ DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -871,8 +988,9 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
location, stack_param_delta);
}
InstructionOperand op = g.UseLocation(*iter, location);
- if (UnallocatedOperand::cast(op).HasFixedSlotPolicy() && !call_tail) {
- int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
+ UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
+ if (unallocated.HasFixedSlotPolicy() && !call_tail) {
+ int stack_index = -unallocated.fixed_slot_index() - 1;
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1);
}
@@ -880,11 +998,23 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
+ // If we do load poisoning and the linkage uses the poisoning register,
+ // then we request the input in memory location, and during code
+ // generation, we move the input to the register.
+ if (poisoning_enabled_ != PoisoningMitigationLevel::kOff &&
+ unallocated.HasFixedRegisterPolicy()) {
+ int reg = unallocated.fixed_register_index();
+ if (reg == kSpeculationPoisonRegister.code()) {
+ buffer->instruction_args[poison_alias_index] = g.TempImmediate(
+ static_cast<int32_t>(buffer->instruction_args.size()));
+ op = g.UseRegisterOrSlotOrConstant(*iter);
+ }
+ }
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- frame_state_entries);
+ frame_state_entries - 1);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && call_tail &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1023,33 +1153,24 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
- SwitchInfo sw;
- // Last successor must be Default.
- sw.default_branch = block->successors().back();
- DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
- // All other successors must be cases.
- sw.case_count = block->SuccessorCount() - 1;
- sw.case_branches = &block->successors().front();
- // Determine case values and their min/max.
- sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
- sw.min_value = std::numeric_limits<int32_t>::max();
- sw.max_value = std::numeric_limits<int32_t>::min();
- for (size_t index = 0; index < sw.case_count; ++index) {
- BasicBlock* branch = sw.case_branches[index];
- int32_t value = OpParameter<int32_t>(branch->front()->op());
- sw.case_values[index] = value;
- if (sw.min_value > value) sw.min_value = value;
- if (sw.max_value < value) sw.max_value = value;
- }
- if (sw.case_count != 0) {
- DCHECK_LE(sw.min_value, sw.max_value);
- // Note that {value_range} can be 0 if {min_value} is -2^31 and
- // {max_value} is 2^31-1, so don't assume that it's non-zero below.
- sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
- bit_cast<uint32_t>(sw.min_value);
- } else {
- sw.value_range = 0;
+ // Last successor must be {IfDefault}.
+ BasicBlock* default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
+ // All other successors must be {IfValue}s.
+ int32_t min_value = std::numeric_limits<int32_t>::max();
+ int32_t max_value = std::numeric_limits<int32_t>::min();
+ size_t case_count = block->SuccessorCount() - 1;
+ ZoneVector<CaseInfo> cases(case_count, zone());
+ for (size_t i = 0; i < case_count; ++i) {
+ BasicBlock* branch = block->SuccessorAt(i);
+ const IfValueParameters& p = IfValueParametersOf(branch->front()->op());
+ cases[i] = CaseInfo{p.value(), p.comparison_order(), branch};
+ if (min_value > p.value()) min_value = p.value();
+ if (max_value < p.value()) max_value = p.value();
}
+ // Ensure that comparison order of if-cascades is preserved.
+ std::stable_sort(cases.begin(), cases.end());
+ SwitchInfo sw(cases, min_value, max_value, default_branch);
return VisitSwitch(input, sw);
}
case BasicBlock::kReturn: {
@@ -1138,7 +1259,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kHeapConstant:
return MarkAsReference(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
- double value = OpParameter<double>(node);
+ double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsReference(node);
return VisitConstant(node);
}
@@ -1497,8 +1618,11 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
- case IrOpcode::kSpeculationPoison:
- return VisitSpeculationPoison(node);
+ case IrOpcode::kPoisonOnSpeculationTagged:
+ return MarkAsReference(node), VisitPoisonOnSpeculationTagged(node);
+ case IrOpcode::kPoisonOnSpeculationWord:
+ return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
+ VisitPoisonOnSpeculationWord(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kLoadStackPointer:
@@ -1507,9 +1631,10 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
return VisitLoadParentFramePointer(node);
+ case IrOpcode::kLoadRootsPointer:
+ return VisitLoadRootsPointer(node);
case IrOpcode::kUnalignedLoad: {
- UnalignedLoadRepresentation type =
- UnalignedLoadRepresentationOf(node->op());
+ LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
return VisitUnalignedLoad(node);
}
@@ -1544,21 +1669,35 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
+ case IrOpcode::kWord64AtomicLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
+ return VisitWord64AtomicLoad(node);
+ }
case IrOpcode::kWord32AtomicStore:
return VisitWord32AtomicStore(node);
-#define ATOMIC_CASE(name) \
- case IrOpcode::kWord32Atomic##name: { \
+ case IrOpcode::kWord64AtomicStore:
+ return VisitWord64AtomicStore(node);
+#define ATOMIC_CASE(name, rep) \
+ case IrOpcode::k##rep##Atomic##name: { \
MachineType type = AtomicOpRepresentationOf(node->op()); \
MarkAsRepresentation(type.representation(), node); \
- return VisitWord32Atomic##name(node); \
- }
- ATOMIC_CASE(Exchange)
- ATOMIC_CASE(CompareExchange)
- ATOMIC_CASE(Add)
- ATOMIC_CASE(Sub)
- ATOMIC_CASE(And)
- ATOMIC_CASE(Or)
- ATOMIC_CASE(Xor)
+ return Visit##rep##Atomic##name(node); \
+ }
+ ATOMIC_CASE(Add, Word32)
+ ATOMIC_CASE(Add, Word64)
+ ATOMIC_CASE(Sub, Word32)
+ ATOMIC_CASE(Sub, Word64)
+ ATOMIC_CASE(And, Word32)
+ ATOMIC_CASE(And, Word64)
+ ATOMIC_CASE(Or, Word32)
+ ATOMIC_CASE(Or, Word64)
+ ATOMIC_CASE(Xor, Word32)
+ ATOMIC_CASE(Xor, Word64)
+ ATOMIC_CASE(Exchange, Word32)
+ ATOMIC_CASE(Exchange, Word64)
+ ATOMIC_CASE(CompareExchange, Word32)
+ ATOMIC_CASE(CompareExchange, Word64)
#undef ATOMIC_CASE
case IrOpcode::kSpeculationFence:
return VisitSpeculationFence(node);
@@ -1819,12 +1958,20 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::VisitSpeculationPoison(Node* node) {
- CHECK(enable_speculation_poison_ == kEnableSpeculationPoison);
- OperandGenerator g(this);
- Emit(kArchNop, g.DefineAsLocation(node, LinkageLocation::ForRegister(
- kSpeculationPoisonRegister.code(),
- MachineType::UintPtr())));
+void InstructionSelector::VisitPoisonOnSpeculationWord(Node* node) {
+ if (poisoning_enabled_ != PoisoningMitigationLevel::kOff) {
+ OperandGenerator g(this);
+ Node* input_node = NodeProperties::GetValueInput(node, 0);
+ InstructionOperand input = g.UseRegister(input_node);
+ InstructionOperand output = g.DefineSameAsFirst(node);
+ Emit(kArchPoisonOnSpeculationWord, output, input);
+ } else {
+ EmitIdentity(node);
+ }
+}
+
+void InstructionSelector::VisitPoisonOnSpeculationTagged(Node* node) {
+ VisitPoisonOnSpeculationWord(node);
}
void InstructionSelector::VisitLoadStackPointer(Node* node) {
@@ -1842,6 +1989,11 @@ void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
Emit(kArchParentFramePointer, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitLoadRootsPointer(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchRootsPointer, g.DefineAsRegister(node));
+}
+
void InstructionSelector::VisitFloat64Acos(Node* node) {
VisitFloat64Ieee754Unop(node, kIeee754Float64Acos);
}
@@ -1929,18 +2081,18 @@ void InstructionSelector::VisitFloat64Tanh(Node* node) {
void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
OperandGenerator g(this);
- size_t input_count = 2 + sw.value_range;
- DCHECK_LE(sw.value_range, std::numeric_limits<size_t>::max() - 2);
+ size_t input_count = 2 + sw.value_range();
+ DCHECK_LE(sw.value_range(), std::numeric_limits<size_t>::max() - 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = index_operand;
- InstructionOperand default_operand = g.Label(sw.default_branch);
+ InstructionOperand default_operand = g.Label(sw.default_branch());
std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < sw.case_count; ++index) {
- size_t value = sw.case_values[index] - sw.min_value;
- BasicBlock* branch = sw.case_branches[index];
+ for (size_t index = 0; index < sw.case_count(); ++index) {
+ const CaseInfo& c = sw.GetCase(index);
+ size_t value = c.value - sw.min_value();
DCHECK_LE(0u, value);
DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
+ inputs[value + 2] = g.Label(c.branch);
}
Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
@@ -1949,16 +2101,15 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand) {
OperandGenerator g(this);
- size_t input_count = 2 + sw.case_count * 2;
- DCHECK_LE(sw.case_count, (std::numeric_limits<size_t>::max() - 2) / 2);
+ size_t input_count = 2 + sw.case_count() * 2;
+ DCHECK_LE(sw.case_count(), (std::numeric_limits<size_t>::max() - 2) / 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
- inputs[1] = g.Label(sw.default_branch);
- for (size_t index = 0; index < sw.case_count; ++index) {
- int32_t value = sw.case_values[index];
- BasicBlock* branch = sw.case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
+ inputs[1] = g.Label(sw.default_branch());
+ for (size_t index = 0; index < sw.case_count(); ++index) {
+ const CaseInfo& c = sw.GetCase(index);
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(c.value);
+ inputs[index * 2 + 2 + 1] = g.Label(c.branch);
}
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
@@ -2155,7 +2306,7 @@ void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2164,19 +2315,37 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_X64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
+void InstructionSelector::VisitWord64AtomicAdd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord64AtomicSub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord64AtomicAnd(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitWord64AtomicOr(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitWord64AtomicXor(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64
+
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2191,12 +2360,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
@@ -2228,12 +2391,6 @@ void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
@@ -2267,13 +2424,13 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
@@ -2559,33 +2716,31 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
- LoadPoisoning poisoning =
- IsSafetyCheckOf(branch->op()) == IsSafetyCheck::kSafetyCheck
- ? load_poisoning_
- : LoadPoisoning::kDontPoison;
+ bool update_poison =
+ IsSafetyCheckOf(branch->op()) == IsSafetyCheck::kSafetyCheck &&
+ poisoning_enabled_ == PoisoningMitigationLevel::kOn;
FlagsContinuation cont =
- FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch, poisoning);
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch, update_poison);
VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
- ? load_poisoning_
- : LoadPoisoning::kDontPoison;
+ bool update_poison = p.is_safety_check() == IsSafetyCheck::kSafetyCheck &&
+ poisoning_enabled_ == PoisoningMitigationLevel::kOn;
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1),
- poisoning);
+ update_poison);
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- LoadPoisoning poisoning = p.is_safety_check() == IsSafetyCheck::kSafetyCheck
- ? load_poisoning_
- : LoadPoisoning::kDontPoison;
+ bool update_poison = p.is_safety_check() == IsSafetyCheck::kSafetyCheck &&
+ poisoning_enabled_ == PoisoningMitigationLevel::kOn;
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1), poisoning);
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1),
+ update_poison);
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
@@ -2602,53 +2757,6 @@ void InstructionSelector::VisitTrapUnless(Node* node,
VisitWordCompareZero(node, node->InputAt(0), &cont);
}
-Instruction* InstructionSelector::EmitDeoptimize(
- InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback, Node* frame_state) {
- size_t output_count = output.IsInvalid() ? 0 : 1;
- InstructionOperand inputs[] = {a};
- size_t input_count = arraysize(inputs);
- return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, feedback, frame_state);
-}
-
-Instruction* InstructionSelector::EmitDeoptimize(
- InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback, Node* frame_state) {
- size_t output_count = output.IsInvalid() ? 0 : 1;
- InstructionOperand inputs[] = {a, b};
- size_t input_count = arraysize(inputs);
- return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, feedback, frame_state);
-}
-
-Instruction* InstructionSelector::EmitDeoptimize(
- InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
- size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, VectorSlotPair const& feedback,
- Node* frame_state) {
- OperandGenerator g(this);
- FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
- InstructionOperandVector args(instruction_zone());
- args.reserve(input_count + 1 + descriptor->GetTotalSize());
- for (size_t i = 0; i < input_count; ++i) {
- args.push_back(inputs[i]);
- }
- opcode |= MiscField::encode(static_cast<int>(input_count));
- DCHECK_NE(DeoptimizeKind::kLazy, kind);
- int const state_id =
- sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
- args.push_back(g.TempImmediate(state_id));
- StateObjectDeduplicator deduplicator(instruction_zone());
- AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
- &args, FrameStateInputKind::kAny,
- instruction_zone());
- return Emit(opcode, output_count, outputs, args.size(), &args.front(), 0,
- nullptr);
-}
-
void InstructionSelector::EmitIdentity(Node* node) {
OperandGenerator g(this);
MarkAsUsed(node->InputAt(0));
@@ -2717,7 +2825,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
DCHECK_EQ(IrOpcode::kFrameState, state->opcode());
DCHECK_EQ(kFrameStateInputCount, state->InputCount());
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(state);
+ FrameStateInfo state_info = FrameStateInfoOf(state->op());
int parameters = static_cast<int>(
StateValuesAccess(state->InputAt(kFrameStateParametersInput)).size());
@@ -2776,7 +2884,7 @@ uint8_t InstructionSelector::CanonicalizeShuffle(Node* node) {
static const int kMaxLaneIndex = 15;
static const int kMaxShuffleIndex = 31;
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
uint8_t mask = kMaxShuffleIndex;
// If shuffle is unary, set 'mask' to ignore the high bit of the indices.
// Replace any unused source with the other.
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index e30dba0aa0..fc597b5843 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -23,12 +23,208 @@ namespace compiler {
// Forward declarations.
class BasicBlock;
struct CallBuffer; // TODO(bmeurer): Remove this.
-class FlagsContinuation;
class Linkage;
class OperandGenerator;
-struct SwitchInfo;
+class SwitchInfo;
class StateObjectDeduplicator;
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator, and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation final {
+ public:
+ FlagsContinuation() : mode_(kFlags_none) {}
+
+ // Creates a new flags continuation from the given condition and true/false
+ // blocks.
+ static FlagsContinuation ForBranch(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block,
+ bool update_poison) {
+ FlagsMode mode = update_poison ? kFlags_branch_and_poison : kFlags_branch;
+ return FlagsContinuation(mode, condition, true_block, false_block);
+ }
+
+ static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
+ BasicBlock* true_block,
+ BasicBlock* false_block) {
+ return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
+ false_block);
+ }
+
+ // Creates a new flags continuation for an eager deoptimization exit.
+ static FlagsContinuation ForDeoptimize(
+ FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state, bool update_poison) {
+ FlagsMode mode =
+ update_poison ? kFlags_deoptimize_and_poison : kFlags_deoptimize;
+ return FlagsContinuation(mode, condition, kind, reason, feedback,
+ frame_state);
+ }
+
+ // Creates a new flags continuation for a boolean value.
+ static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
+ return FlagsContinuation(condition, result);
+ }
+
+ // Creates a new flags continuation for a wasm trap.
+ static FlagsContinuation ForTrap(FlagsCondition condition,
+ Runtime::FunctionId trap_id, Node* result) {
+ return FlagsContinuation(condition, trap_id, result);
+ }
+
+ bool IsNone() const { return mode_ == kFlags_none; }
+ bool IsBranch() const {
+ return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
+ }
+ bool IsDeoptimize() const {
+ return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
+ }
+ bool IsPoisoned() const {
+ return mode_ == kFlags_branch_and_poison ||
+ mode_ == kFlags_deoptimize_and_poison;
+ }
+ bool IsSet() const { return mode_ == kFlags_set; }
+ bool IsTrap() const { return mode_ == kFlags_trap; }
+ FlagsCondition condition() const {
+ DCHECK(!IsNone());
+ return condition_;
+ }
+ DeoptimizeKind kind() const {
+ DCHECK(IsDeoptimize());
+ return kind_;
+ }
+ DeoptimizeReason reason() const {
+ DCHECK(IsDeoptimize());
+ return reason_;
+ }
+ VectorSlotPair const& feedback() const {
+ DCHECK(IsDeoptimize());
+ return feedback_;
+ }
+ Node* frame_state() const {
+ DCHECK(IsDeoptimize());
+ return frame_state_or_result_;
+ }
+ Node* result() const {
+ DCHECK(IsSet());
+ return frame_state_or_result_;
+ }
+ Runtime::FunctionId trap_id() const {
+ DCHECK(IsTrap());
+ return trap_id_;
+ }
+ BasicBlock* true_block() const {
+ DCHECK(IsBranch());
+ return true_block_;
+ }
+ BasicBlock* false_block() const {
+ DCHECK(IsBranch());
+ return false_block_;
+ }
+
+ void Negate() {
+ DCHECK(!IsNone());
+ condition_ = NegateFlagsCondition(condition_);
+ }
+
+ void Commute() {
+ DCHECK(!IsNone());
+ condition_ = CommuteFlagsCondition(condition_);
+ }
+
+ void Overwrite(FlagsCondition condition) { condition_ = condition; }
+
+ void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+ DCHECK(condition_ == kEqual || condition_ == kNotEqual);
+ bool negate = condition_ == kEqual;
+ condition_ = condition;
+ if (negate) Negate();
+ }
+
+ void OverwriteUnsignedIfSigned() {
+ switch (condition_) {
+ case kSignedLessThan:
+ condition_ = kUnsignedLessThan;
+ break;
+ case kSignedLessThanOrEqual:
+ condition_ = kUnsignedLessThanOrEqual;
+ break;
+ case kSignedGreaterThan:
+ condition_ = kUnsignedGreaterThan;
+ break;
+ case kSignedGreaterThanOrEqual:
+ condition_ = kUnsignedGreaterThanOrEqual;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Encodes this flags continuation into the given opcode.
+ InstructionCode Encode(InstructionCode opcode) {
+ opcode |= FlagsModeField::encode(mode_);
+ if (mode_ != kFlags_none) {
+ opcode |= FlagsConditionField::encode(condition_);
+ }
+ return opcode;
+ }
+
+ private:
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ BasicBlock* true_block, BasicBlock* false_block)
+ : mode_(mode),
+ condition_(condition),
+ true_block_(true_block),
+ false_block_(false_block) {
+ DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK_NOT_NULL(true_block);
+ DCHECK_NOT_NULL(false_block);
+ }
+
+ FlagsContinuation(FlagsMode mode, FlagsCondition condition,
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state)
+ : mode_(mode),
+ condition_(condition),
+ kind_(kind),
+ reason_(reason),
+ feedback_(feedback),
+ frame_state_or_result_(frame_state) {
+ DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
+ DCHECK_NOT_NULL(frame_state);
+ }
+
+ FlagsContinuation(FlagsCondition condition, Node* result)
+ : mode_(kFlags_set),
+ condition_(condition),
+ frame_state_or_result_(result) {
+ DCHECK_NOT_NULL(result);
+ }
+
+ FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
+ Node* result)
+ : mode_(kFlags_trap),
+ condition_(condition),
+ frame_state_or_result_(result),
+ trap_id_(trap_id) {
+ DCHECK_NOT_NULL(result);
+ }
+
+ FlagsMode const mode_;
+ FlagsCondition condition_;
+ DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize*
+ DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize*
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize*
+ Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize*
+ // or mode_ == kFlags_set.
+ BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch*.
+ BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch*.
+ Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
+};
+
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
struct PushParameter {
@@ -55,24 +251,20 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
kDisableSwitchJumpTable,
kEnableSwitchJumpTable
};
- enum EnableSpeculationPoison {
- kDisableSpeculationPoison,
- kEnableSpeculationPoison
- };
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
EnableSwitchJumpTable enable_switch_jump_table,
- EnableSpeculationPoison enable_speculation_poison,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
? kEnableScheduling
: kDisableScheduling,
EnableSerialization enable_serialization = kDisableSerialization,
- LoadPoisoning poisoning = LoadPoisoning::kDontPoison);
+ PoisoningMitigationLevel poisoning_enabled =
+ PoisoningMitigationLevel::kOff);
// Visit code for the entire graph with the included schedule.
bool SelectInstructions();
@@ -80,6 +272,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void StartBlock(RpoNumber rpo);
void EndBlock(RpoNumber rpo);
void AddInstruction(Instruction* instr);
+ void AddTerminator(Instruction* instr);
// ===========================================================================
// ============= Architecture-independent code emission methods. =============
@@ -117,20 +310,29 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionOperand* temps = nullptr);
Instruction* Emit(Instruction* instr);
+ // [0-3] operand instructions with no output, uses labels for true and false
+ // blocks of the continuation.
+ Instruction* EmitWithContinuation(InstructionCode opcode,
+ FlagsContinuation* cont);
+ Instruction* EmitWithContinuation(InstructionCode opcode,
+ InstructionOperand a,
+ FlagsContinuation* cont);
+ Instruction* EmitWithContinuation(InstructionCode opcode,
+ InstructionOperand a, InstructionOperand b,
+ FlagsContinuation* cont);
+ Instruction* EmitWithContinuation(InstructionCode opcode,
+ InstructionOperand a, InstructionOperand b,
+ InstructionOperand c,
+ FlagsContinuation* cont);
+ Instruction* EmitWithContinuation(InstructionCode opcode, size_t output_count,
+ InstructionOperand* outputs,
+ size_t input_count,
+ InstructionOperand* inputs,
+ FlagsContinuation* cont);
+
// ===========================================================================
// ===== Architecture-independent deoptimization exit emission methods. ======
// ===========================================================================
-
- Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
- InstructionOperand a, DeoptimizeKind kind,
- DeoptimizeReason reason,
- VectorSlotPair const& feedback,
- Node* frame_state);
- Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
- InstructionOperand a, InstructionOperand b,
- DeoptimizeKind kind, DeoptimizeReason reason,
- VectorSlotPair const& feedback,
- Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, DeoptimizeKind kind,
@@ -169,9 +371,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
- // TODO(jarin) This is temporary until the poisoning is universally supported.
- static bool SupportsSpeculationPoisoning();
-
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -241,6 +440,11 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
InstructionScheduler::SchedulerSupported();
}
+ void AppendDeoptimizeArguments(InstructionOperandVector* args,
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
+
void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
void EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand);
@@ -442,6 +646,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitAtomicBinaryOperation(Node* node, ArchOpcode int8_op,
ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op);
+ void VisitWord64AtomicBinaryOperation(Node* node, ArchOpcode uint8_op,
+ ArchOpcode uint16_op,
+ ArchOpcode uint32_op,
+ ArchOpcode uint64_op);
// ===========================================================================
@@ -454,6 +662,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Schedule* const schedule_;
BasicBlock* current_block_;
ZoneVector<Instruction*> instructions_;
+ InstructionOperandVector continuation_inputs_;
+ InstructionOperandVector continuation_outputs_;
BoolVector defined_;
BoolVector used_;
IntVector effect_level_;
@@ -463,8 +673,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
EnableScheduling enable_scheduling_;
EnableSerialization enable_serialization_;
EnableSwitchJumpTable enable_switch_jump_table_;
- EnableSpeculationPoison enable_speculation_poison_;
- LoadPoisoning load_poisoning_;
+
+ PoisoningMitigationLevel poisoning_enabled_;
Frame* frame_;
bool instruction_selection_failed_;
};
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 85d4533d25..68f0ae2a9c 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -156,8 +156,10 @@ std::ostream& operator<<(std::ostream& os,
return os << "(S)";
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
return os << "(1)";
- case UnallocatedOperand::ANY:
+ case UnallocatedOperand::REGISTER_OR_SLOT:
return os << "(-)";
+ case UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ return os << "(*)";
}
}
case InstructionOperand::CONSTANT:
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 11da39aacb..803f3e0c1d 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -170,7 +170,8 @@ class UnallocatedOperand final : public InstructionOperand {
enum ExtendedPolicy {
NONE,
- ANY,
+ REGISTER_OR_SLOT,
+ REGISTER_OR_SLOT_OR_CONSTANT,
FIXED_REGISTER,
FIXED_FP_REGISTER,
MUST_HAVE_REGISTER,
@@ -236,8 +237,13 @@ class UnallocatedOperand final : public InstructionOperand {
}
// Predicates for the operand policy.
- bool HasAnyPolicy() const {
- return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+ bool HasRegisterOrSlotPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == REGISTER_OR_SLOT;
+ }
+ bool HasRegisterOrSlotOrConstantPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == REGISTER_OR_SLOT_OR_CONSTANT;
}
bool HasFixedPolicy() const {
return basic_policy() == FIXED_SLOT ||
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index ca1bf399b0..8248715876 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -149,7 +149,7 @@ void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
void Int64Lowering::LowerNode(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt64Constant: {
- int64_t value = OpParameter<int64_t>(node);
+ int64_t value = OpParameter<int64_t>(node->op());
Node* low_node = graph()->NewNode(
common()->Int32Constant(static_cast<int32_t>(value & 0xFFFFFFFF)));
Node* high_node = graph()->NewNode(
@@ -164,7 +164,7 @@ void Int64Lowering::LowerNode(Node* node) {
rep = LoadRepresentationOf(node->op()).representation();
} else {
DCHECK_EQ(IrOpcode::kUnalignedLoad, node->opcode());
- rep = UnalignedLoadRepresentationOf(node->op()).representation();
+ rep = LoadRepresentationOf(node->op()).representation();
}
if (rep == MachineRepresentation::kWord64) {
@@ -281,15 +281,15 @@ void Int64Lowering::LowerNode(Node* node) {
static_cast<int>(signature()->parameter_count())) {
int old_index = ParameterIndexOf(node->op());
// TODO(wasm): Make this part not wasm specific.
- // Prevent special lowering of the WasmContext parameter.
- if (old_index == kWasmContextParameterIndex) {
+ // Prevent special lowering of the instance parameter.
+ if (old_index == kWasmInstanceParameterIndex) {
DefaultLowering(node);
break;
}
// Adjust old_index to be compliant with the signature.
--old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
- // Adjust new_index to consider the WasmContext parameter.
+ // Adjust new_index to consider the instance parameter.
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index a6d98586ad..c3483092d2 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -115,653 +115,6 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
native_context_(native_context),
type_cache_(TypeCache::Get()) {}
-namespace {
-
-Maybe<InstanceType> GetInstanceTypeWitness(Node* node) {
- ZoneHandleSet<Map> maps;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &maps);
-
- if (result == NodeProperties::kNoReceiverMaps || maps.size() == 0) {
- return Nothing<InstanceType>();
- }
-
- InstanceType first_type = maps[0]->instance_type();
- for (const Handle<Map>& map : maps) {
- if (map->instance_type() != first_type) return Nothing<InstanceType>();
- }
- return Just(first_type);
-}
-
-bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
- // Ensure that the [[Prototype]] is actually an exotic Array
- if (!receiver_map->prototype()->IsJSArray()) return false;
-
- // Don't inline JSArrays with slow elements of any kind
- if (!IsFastElementsKind(receiver_map->elements_kind())) return false;
-
- // If the receiver map has packed elements, no need to check the prototype.
- // This requires a MapCheck where this is used.
- if (!IsHoleyElementsKind(receiver_map->elements_kind())) return true;
-
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- // Ensure all prototypes of the {receiver} are stable.
- for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
- !it.IsAtEnd(); it.Advance()) {
- Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
- if (!current->map()->is_stable()) return false;
- }
-
- // For holey Arrays, ensure that the no_elements_protector cell is valid (must
- // be a CompilationDependency), and the JSArray prototype has not been
- // altered.
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- (!receiver_map->is_dictionary_map() || receiver_map->is_stable()) &&
- isolate->IsNoElementsProtectorIntact() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype);
-}
-
-} // namespace
-
-Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
- IterationKind kind) {
- Handle<Map> receiver_map;
- if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map)) {
- return ReduceArrayIterator(receiver_map, node, kind,
- ArrayIteratorKind::kArray);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
- IterationKind kind) {
- Handle<Map> receiver_map;
- if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map) &&
- receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
- return ReduceArrayIterator(receiver_map, node, kind,
- ArrayIteratorKind::kTypedArray);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
- Node* node, IterationKind kind,
- ArrayIteratorKind iter_kind) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (iter_kind == ArrayIteratorKind::kTypedArray) {
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
- } else {
- // For JSTypedArray iterator methods, deopt if the buffer is neutered.
- // This is potentially a deopt loop, but should be extremely unlikely.
- DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
-
- // Deoptimize if the {buffer} has been neutered.
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check = graph()->NewNode(simplified()->BooleanNot(), check);
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
- check, effect, control);
- }
- }
-
- int map_index = -1;
- Node* object_map = jsgraph()->UndefinedConstant();
- switch (receiver_map->instance_type()) {
- case JS_ARRAY_TYPE:
- if (kind == IterationKind::kKeys) {
- map_index = Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX;
- } else {
- map_index = kind == IterationKind::kValues
- ? Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX
- : Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
-
- if (CanInlineJSArrayIteration(receiver_map)) {
- // Use `generic` elements for holey arrays if there may be elements
- // on the prototype chain.
- map_index += static_cast<int>(receiver_map->elements_kind());
- object_map = jsgraph()->Constant(receiver_map);
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- Handle<JSObject> initial_array_prototype(
- native_context()->initial_array_prototype(), isolate());
- dependencies()->AssumePrototypeMapsStable(receiver_map,
- initial_array_prototype);
- }
- } else {
- map_index += (Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
- Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX);
- }
- }
- break;
- case JS_TYPED_ARRAY_TYPE:
- if (kind == IterationKind::kKeys) {
- map_index = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
- } else {
- DCHECK_GE(receiver_map->elements_kind(), UINT8_ELEMENTS);
- DCHECK_LE(receiver_map->elements_kind(), BIGINT64_ELEMENTS);
- map_index = (kind == IterationKind::kValues
- ? Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX
- : Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX) +
- (receiver_map->elements_kind() - UINT8_ELEMENTS);
- }
- break;
- default:
- if (kind == IterationKind::kKeys) {
- map_index = Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX;
- } else if (kind == IterationKind::kValues) {
- map_index = Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX;
- } else {
- map_index = Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
- }
- break;
- }
-
- DCHECK_GE(map_index, Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX);
- DCHECK_LE(map_index, Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX);
-
- Handle<Map> map(Map::cast(native_context()->get(map_index)), isolate());
-
- // Allocate new iterator and attach the iterator to this object.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSArrayIterator::kSize, NOT_TENURED, Type::OtherObject());
- a.Store(AccessBuilder::ForMap(), map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSArrayIteratorObject(), receiver);
- a.Store(AccessBuilder::ForJSArrayIteratorIndex(), jsgraph()->ZeroConstant());
- a.Store(AccessBuilder::ForJSArrayIteratorObjectMap(), object_map);
- Node* value = effect = a.Finish();
-
- // Replace it.
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(InstanceType type,
- Node* node,
- IterationKind kind) {
- Node* iterator = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
-
- if (kind != IterationKind::kKeys &&
- !isolate()->IsFastArrayIterationIntact()) {
- // Avoid deopt loops for non-key iteration if the
- // fast_array_iteration_protector cell has been invalidated.
- return NoChange();
- }
-
- ElementsKind elements_kind =
- JSArrayIterator::ElementsKindForInstanceType(type);
-
- if (IsHoleyElementsKind(elements_kind)) {
- if (!isolate()->IsNoElementsProtectorIntact()) {
- return NoChange();
- } else {
- Handle<JSObject> initial_array_prototype(
- native_context()->initial_array_prototype(), isolate());
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- }
- }
-
- Node* array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
- iterator, effect, control);
- Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
- jsgraph()->UndefinedConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* vdone_false0;
- Node* vfalse0;
- Node* efalse0 = effect;
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- {
- // iterator.[[IteratedObject]] !== undefined, continue iterating.
- Node* index = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
- JS_ARRAY_TYPE, elements_kind)),
- iterator, efalse0, if_false0);
-
- Node* length = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(elements_kind)),
- array, efalse0, if_false0);
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
-
- Node* vdone_true1;
- Node* vtrue1;
- Node* etrue1 = efalse0;
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- {
- // iterator.[[NextIndex]] < array.length, continue iterating
- vdone_true1 = jsgraph()->FalseConstant();
- if (kind == IterationKind::kKeys) {
- vtrue1 = index;
- } else {
- // For value/entry iteration, first step is a mapcheck to ensure
- // inlining is still valid.
- Node* array_map = etrue1 =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- array, etrue1, if_true1);
- Node* orig_map = etrue1 =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayIteratorObjectMap()),
- iterator, etrue1, if_true1);
- Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
- array_map, orig_map);
- etrue1 =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
- check_map, etrue1, if_true1);
- }
-
- if (kind != IterationKind::kKeys) {
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- array, etrue1, if_true1);
- Node* value = etrue1 = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForFixedArrayElement(elements_kind)),
- elements, index, etrue1, if_true1);
-
- // Convert hole to undefined if needed.
- if (elements_kind == HOLEY_ELEMENTS ||
- elements_kind == HOLEY_SMI_ELEMENTS) {
- value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
- value);
- } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
- // TODO(6587): avoid deopt if not all uses of value are truncated.
- CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
- value = etrue1 = graph()->NewNode(
- simplified()->CheckFloat64Hole(mode), value, etrue1, if_true1);
- }
-
- if (kind == IterationKind::kEntries) {
- // Allocate elements for key/value pair
- vtrue1 = etrue1 =
- graph()->NewNode(javascript()->CreateKeyValueArray(), index,
- value, context, etrue1);
- } else {
- DCHECK_EQ(kind, IterationKind::kValues);
- vtrue1 = value;
- }
- }
-
- Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant());
- next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
-
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
- JS_ARRAY_TYPE, elements_kind)),
- iterator, next_index, etrue1, if_true1);
- }
-
- Node* vdone_false1;
- Node* vfalse1;
- Node* efalse1 = efalse0;
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- {
- // iterator.[[NextIndex]] >= array.length, stop iterating.
- vdone_false1 = jsgraph()->TrueConstant();
- vfalse1 = jsgraph()->UndefinedConstant();
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
- iterator, vfalse1, efalse1, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
- vdone_false0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vdone_true1, vdone_false1, if_false0);
- }
-
- Node* vdone_true0;
- Node* vtrue0;
- Node* etrue0 = effect;
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- {
- // iterator.[[IteratedObject]] === undefined, the iterator is done.
- vdone_true0 = jsgraph()->TrueConstant();
- vtrue0 = jsgraph()->UndefinedConstant();
- }
-
- control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
- effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vfalse0, vtrue0, control);
- Node* done =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vdone_false0, vdone_true0, control);
-
- // Create IteratorResult object.
- value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
- value, done, context, effect);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(InstanceType type,
- Node* node,
- IterationKind kind) {
- Node* iterator = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
-
- ElementsKind elements_kind =
- JSArrayIterator::ElementsKindForInstanceType(type);
-
- Node* array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
- iterator, effect, control);
- Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
- jsgraph()->UndefinedConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* vdone_false0;
- Node* vfalse0;
- Node* efalse0 = effect;
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- {
- // iterator.[[IteratedObject]] !== undefined, continue iterating.
- Node* index = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
- JS_TYPED_ARRAY_TYPE, elements_kind)),
- iterator, efalse0, if_false0);
-
- // typedarray.[[ViewedArrayBuffer]]
- Node* buffer = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- array, efalse0, if_false0);
-
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
- } else {
- // Deoptimize if the array buffer was neutered.
- Node* check1 = efalse0 = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
- check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
- efalse0 = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
- check1, efalse0, if_false0);
- }
-
- Node* length = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
- efalse0, if_false0);
-
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_false0);
-
- Node* vdone_true2;
- Node* vtrue2;
- Node* etrue2 = efalse0;
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- {
- // iterator.[[NextIndex]] < array.length, continue iterating
- vdone_true2 = jsgraph()->FalseConstant();
- if (kind == IterationKind::kKeys) {
- vtrue2 = index;
- }
-
- Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant());
- next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
-
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
- JS_TYPED_ARRAY_TYPE, elements_kind)),
- iterator, next_index, etrue2, if_true2);
-
- if (kind != IterationKind::kKeys) {
- Node* elements = etrue2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- array, etrue2, if_true2);
- Node* base_ptr = etrue2 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, etrue2, if_true2);
- Node* external_ptr = etrue2 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
- elements, etrue2, if_true2);
-
- ExternalArrayType array_type = kExternalInt8Array;
- switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- array_type = kExternal##Type##Array; \
- break;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
-#undef TYPED_ARRAY_CASE
- }
-
- Node* value = etrue2 =
- graph()->NewNode(simplified()->LoadTypedElement(array_type), buffer,
- base_ptr, external_ptr, index, etrue2, if_true2);
-
- if (kind == IterationKind::kEntries) {
- // Allocate elements for key/value pair
- vtrue2 = etrue2 =
- graph()->NewNode(javascript()->CreateKeyValueArray(), index,
- value, context, etrue2);
- } else {
- DCHECK_EQ(IterationKind::kValues, kind);
- vtrue2 = value;
- }
- }
- }
-
- Node* vdone_false2;
- Node* vfalse2;
- Node* efalse2 = efalse0;
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- {
- // iterator.[[NextIndex]] >= array.length, stop iterating.
- vdone_false2 = jsgraph()->TrueConstant();
- vfalse2 = jsgraph()->UndefinedConstant();
- efalse2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
- iterator, vfalse2, efalse2, if_false2);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue2, vfalse2, if_false0);
- vdone_false0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vdone_true2, vdone_false2, if_false0);
- }
-
- Node* vdone_true0;
- Node* vtrue0;
- Node* etrue0 = effect;
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- {
- // iterator.[[IteratedObject]] === undefined, the iterator is done.
- vdone_true0 = jsgraph()->TrueConstant();
- vtrue0 = jsgraph()->UndefinedConstant();
- }
-
- control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
- effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vfalse0, vtrue0, control);
- Node* done =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vdone_false0, vdone_true0, control);
-
- // Create IteratorResult object.
- value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
- value, done, context, effect);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-// ES #sec-get-%typedarray%.prototype-@@tostringtag
-Reduction JSBuiltinReducer::ReduceTypedArrayToStringTag(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- NodeVector values(graph()->zone());
- NodeVector effects(graph()->zone());
- NodeVector controls(graph()->zone());
-
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
- control =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- values.push_back(jsgraph()->UndefinedConstant());
- effects.push_back(effect);
- controls.push_back(graph()->NewNode(common()->IfTrue(), control));
-
- control = graph()->NewNode(common()->IfFalse(), control);
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* receiver_bit_field2 = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
- effect, control);
- Node* receiver_elements_kind = graph()->NewNode(
- simplified()->NumberShiftRightLogical(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
- jsgraph()->Constant(Map::ElementsKindBits::kMask)),
- jsgraph()->Constant(Map::ElementsKindBits::kShift));
-
- // Offset the elements kind by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
- // so that the branch cascade below is turned into a simple table
- // switch by the ControlFlowOptimizer later.
- receiver_elements_kind = graph()->NewNode(
- simplified()->NumberSubtract(), receiver_elements_kind,
- jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- do { \
- Node* check = graph()->NewNode( \
- simplified()->NumberEqual(), receiver_elements_kind, \
- jsgraph()->Constant(TYPE##_ELEMENTS - \
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \
- control = graph()->NewNode(common()->Branch(), check, control); \
- values.push_back(jsgraph()->HeapConstant( \
- factory()->InternalizeUtf8String(#Type "Array"))); \
- effects.push_back(effect); \
- controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
- control = graph()->NewNode(common()->IfFalse(), control); \
- } while (false);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- values.push_back(jsgraph()->UndefinedConstant());
- effects.push_back(effect);
- controls.push_back(control);
-
- int const count = static_cast<int>(controls.size());
- control = graph()->NewNode(common()->Merge(count), count, &controls.front());
- effects.push_back(control);
- effect =
- graph()->NewNode(common()->EffectPhi(count), count + 1, &effects.front());
- values.push_back(control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
- count + 1, &values.front());
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
- Maybe<InstanceType> maybe_type = GetInstanceTypeWitness(node);
- if (!maybe_type.IsJust()) return NoChange();
- InstanceType type = maybe_type.FromJust();
- switch (type) {
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(type, node, IterationKind::kKeys);
-
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(type, node, IterationKind::kKeys);
-
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(type, node, IterationKind::kEntries);
-
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(type, node, IterationKind::kEntries);
-
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(type, node, IterationKind::kValues);
-
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(type, node, IterationKind::kValues);
-
- default:
- // Slow array iterators are not reduced
- return NoChange();
- }
-}
-
// ES6 section 22.1.2.2 Array.isArray ( arg )
Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
// We certainly know that undefined is not an array.
@@ -872,343 +225,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceCollectionIterator(
- Node* node, InstanceType collection_instance_type,
- int collection_iterator_map_index) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
- collection_instance_type)) {
- // Figure out the proper collection iterator map.
- Handle<Map> collection_iterator_map(
- Map::cast(native_context()->get(collection_iterator_map_index)),
- isolate());
-
- // Load the OrderedHashTable from the {receiver}.
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
- receiver, effect, control);
-
- // Create the JSCollectionIterator result.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSCollectionIterator::kSize, NOT_TENURED, Type::OtherObject());
- a.Store(AccessBuilder::ForMap(), collection_iterator_map);
- a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSCollectionIteratorTable(), table);
- a.Store(AccessBuilder::ForJSCollectionIteratorIndex(),
- jsgraph()->ZeroConstant());
- Node* value = effect = a.Finish();
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceCollectionSize(
- Node* node, InstanceType collection_instance_type) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect,
- collection_instance_type)) {
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
- receiver, effect, control);
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
- table, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
- Node* node, int entry_size, Handle<HeapObject> empty_collection,
- InstanceType collection_iterator_instance_type_first,
- InstanceType collection_iterator_instance_type_last) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // A word of warning to begin with: This whole method might look a bit
- // strange at times, but that's mostly because it was carefully handcrafted
- // to allow for full escape analysis and scalar replacement of both the
- // collection iterator object and the iterator results, including the
- // key-value arrays in case of Set/Map entry iteration.
- //
- // TODO(turbofan): Currently the escape analysis (and the store-load
- // forwarding) is unable to eliminate the allocations for the key-value
- // arrays in case of Set/Map entry iteration, and we should investigate
- // how to update the escape analysis / arrange the graph in a way that
- // this becomes possible.
-
- // Infer the {receiver} instance type.
- InstanceType receiver_instance_type;
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
- receiver_instance_type = receiver_maps[0]->instance_type();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- if (receiver_maps[i]->instance_type() != receiver_instance_type) {
- return NoChange();
- }
- }
- if (receiver_instance_type < collection_iterator_instance_type_first ||
- receiver_instance_type > collection_iterator_instance_type_last) {
- return NoChange();
- }
-
- // Transition the JSCollectionIterator {receiver} if necessary
- // (i.e. there were certain mutations while we're iterating).
- {
- Node* done_loop;
- Node* done_eloop;
- Node* loop = control =
- graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
-
- // Check if reached the final table of the {receiver}.
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
- receiver, effect, control);
- Node* next_table = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNextTable()),
- table, effect, control);
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), next_table);
- control =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- // Abort the {loop} when we reach the final table.
- done_loop = graph()->NewNode(common()->IfTrue(), control);
- done_eloop = effect;
-
- // Migrate to the {next_table} otherwise.
- control = graph()->NewNode(common()->IfFalse(), control);
-
- // Self-heal the {receiver}s index.
- Node* index = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
- receiver, effect, control);
- Callable const callable =
- Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, Operator::kEliminatable);
- index = effect =
- graph()->NewNode(common()->Call(call_descriptor),
- jsgraph()->HeapConstant(callable.code()), table, index,
- jsgraph()->NoContextConstant(), effect);
- NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
-
- // Update the {index} and {table} on the {receiver}.
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
- receiver, index, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
- receiver, next_table, effect, control);
-
- // Tie the knot.
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
-
- control = done_loop;
- effect = done_eloop;
- }
-
- // Get current index and table from the JSCollectionIterator {receiver}.
- Node* index = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
- receiver, effect, control);
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
- receiver, effect, control);
-
- // Create the {JSIteratorResult} first to ensure that we always have
- // a dominating Allocate node for the allocation folding phase.
- Node* iterator_result = effect = graph()->NewNode(
- javascript()->CreateIterResultObject(), jsgraph()->UndefinedConstant(),
- jsgraph()->TrueConstant(), context, effect);
-
- // Look for the next non-holey key, starting from {index} in the {table}.
- Node* controls[2];
- Node* effects[3];
- {
- // Compute the currently used capacity.
- Node* number_of_buckets = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets()),
- table, effect, control);
- Node* number_of_elements = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
- table, effect, control);
- Node* number_of_deleted_elements = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements()),
- table, effect, control);
- Node* used_capacity =
- graph()->NewNode(simplified()->NumberAdd(), number_of_elements,
- number_of_deleted_elements);
-
- // Skip holes and update the {index}.
- Node* loop = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* iloop = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
- NodeProperties::SetType(iloop, type_cache_.kFixedArrayLengthType);
- {
- Node* check0 = graph()->NewNode(simplified()->NumberLessThan(), iloop,
- used_capacity);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, loop);
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = eloop;
- {
- // Mark the {receiver} as exhausted.
- efalse0 = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSCollectionIteratorTable()),
- receiver, jsgraph()->HeapConstant(empty_collection), efalse0,
- if_false0);
-
- controls[0] = if_false0;
- effects[0] = efalse0;
- }
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = eloop;
- {
- // Load the key of the entry.
- Node* entry_start_position = graph()->NewNode(
- simplified()->NumberAdd(),
- graph()->NewNode(
- simplified()->NumberAdd(),
- graph()->NewNode(simplified()->NumberMultiply(), iloop,
- jsgraph()->Constant(entry_size)),
- number_of_buckets),
- jsgraph()->Constant(OrderedHashTableBase::kHashTableStartIndex));
- Node* entry_key = etrue0 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
- table, entry_start_position, etrue0, if_true0);
-
- // Advance the index.
- Node* index = graph()->NewNode(simplified()->NumberAdd(), iloop,
- jsgraph()->OneConstant());
-
- Node* check1 =
- graph()->NewNode(simplified()->ReferenceEqual(), entry_key,
- jsgraph()->TheHoleConstant());
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_true0);
-
- {
- // Abort loop with resulting value.
- Node* control = graph()->NewNode(common()->IfFalse(), branch1);
- Node* effect = etrue0;
- Node* value = effect =
- graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
- entry_key, effect, control);
- Node* done = jsgraph()->FalseConstant();
-
- // Advance the index on the {receiver}.
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSCollectionIteratorIndex()),
- receiver, index, effect, control);
-
- // The actual {value} depends on the {receiver} iteration type.
- switch (receiver_instance_type) {
- case JS_MAP_KEY_ITERATOR_TYPE:
- case JS_SET_VALUE_ITERATOR_TYPE:
- break;
-
- case JS_SET_KEY_VALUE_ITERATOR_TYPE:
- value = effect =
- graph()->NewNode(javascript()->CreateKeyValueArray(), value,
- value, context, effect);
- break;
-
- case JS_MAP_VALUE_ITERATOR_TYPE:
- value = effect = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForFixedArrayElement()),
- table,
- graph()->NewNode(
- simplified()->NumberAdd(), entry_start_position,
- jsgraph()->Constant(OrderedHashMap::kValueOffset)),
- effect, control);
- break;
-
- case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
- value = effect = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForFixedArrayElement()),
- table,
- graph()->NewNode(
- simplified()->NumberAdd(), entry_start_position,
- jsgraph()->Constant(OrderedHashMap::kValueOffset)),
- effect, control);
- value = effect =
- graph()->NewNode(javascript()->CreateKeyValueArray(),
- entry_key, value, context, effect);
- break;
-
- default:
- UNREACHABLE();
- break;
- }
-
- // Store final {value} and {done} into the {iterator_result}.
- effect =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSIteratorResultValue()),
- iterator_result, value, effect, control);
- effect =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSIteratorResultDone()),
- iterator_result, done, effect, control);
-
- controls[1] = control;
- effects[1] = effect;
- }
-
- // Continue with next loop index.
- loop->ReplaceInput(1, graph()->NewNode(common()->IfTrue(), branch1));
- eloop->ReplaceInput(1, etrue0);
- iloop->ReplaceInput(1, index);
- }
- }
-
- control = effects[2] = graph()->NewNode(common()->Merge(2), 2, controls);
- effect = graph()->NewNode(common()->EffectPhi(2), 3, effects);
- }
-
- // Yield the final {iterator_result}.
- ReplaceWithValue(node, iterator_result, effect, control);
- return Replace(iterator_result);
-}
-
// ES6 section 20.3.3.1 Date.now ( )
Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
NodeProperties::RemoveValueInputs(node);
@@ -1260,131 +276,6 @@ Reduction JSBuiltinReducer::ReduceGlobalIsNaN(Node* node) {
return NoChange();
}
-Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
- // We only optimize if we have target, receiver and key parameters.
- if (node->op()->ValueInputCount() != 3) return NoChange();
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* key = NodeProperties::GetValueInput(node, 2);
-
- if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
- return NoChange();
-
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
- effect, control);
-
- Node* entry = effect = graph()->NewNode(
- simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
-
- Node* check = graph()->NewNode(simplified()->NumberEqual(), entry,
- jsgraph()->MinusOneConstant());
-
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- // Key not found.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
-
- // Key found.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForOrderedHashMapEntryValue()),
- table, entry, efalse, if_false);
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
- // We only optimize if we have target, receiver and key parameters.
- if (node->op()->ValueInputCount() != 3) return NoChange();
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* key = NodeProperties::GetValueInput(node, 2);
-
- if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
- return NoChange();
-
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
- effect, control);
-
- Node* index = effect = graph()->NewNode(
- simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
-
- Node* value = graph()->NewNode(simplified()->NumberEqual(), index,
- jsgraph()->MinusOneConstant());
- value = graph()->NewNode(simplified()->BooleanNot(), value);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
-}
-
-// ES6 section 20.1.2.2 Number.isFinite ( number )
-Reduction JSBuiltinReducer::ReduceNumberIsFinite(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Number.isFinite(a:number) -> NumberEqual(a', a')
- // where a' = NumberSubtract(a, a)
- Node* input = r.GetJSCallInput(0);
- Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, input);
- Node* value = graph()->NewNode(simplified()->NumberEqual(), diff, diff);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.1.2.3 Number.isInteger ( number )
-Reduction JSBuiltinReducer::ReduceNumberIsInteger(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Number.isInteger(x:number) -> NumberEqual(NumberSubtract(x, x'), #0)
- // where x' = NumberTrunc(x)
- Node* input = r.GetJSCallInput(0);
- Node* trunc = graph()->NewNode(simplified()->NumberTrunc(), input);
- Node* diff = graph()->NewNode(simplified()->NumberSubtract(), input, trunc);
- Node* value = graph()->NewNode(simplified()->NumberEqual(), diff,
- jsgraph()->ZeroConstant());
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 20.1.2.4 Number.isNaN ( number )
-Reduction JSBuiltinReducer::ReduceNumberIsNaN(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchZero()) {
- // Number.isNaN() -> #false
- Node* value = jsgraph()->FalseConstant();
- return Replace(value);
- }
- // Number.isNaN(a:number) -> ObjectIsNaN(a)
- Node* input = r.GetJSCallInput(0);
- Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
- return Replace(value);
-}
-
-// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
-Reduction JSBuiltinReducer::ReduceNumberIsSafeInteger(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(type_cache_.kSafeInteger)) {
- // Number.isInteger(x:safe-integer) -> #true
- Node* value = jsgraph()->TrueConstant();
- return Replace(value);
- }
- return NoChange();
-}
-
// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
JSCallReduction r(node);
@@ -1483,168 +374,6 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
return Replace(value);
}
-namespace {
-
-Node* GetStringWitness(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Type* receiver_type = NodeProperties::GetType(receiver);
- Node* effect = NodeProperties::GetEffectInput(node);
- if (receiver_type->Is(Type::String())) return receiver;
- // Check if the {node} is dominated by a CheckString renaming for
- // it's {receiver}, and if so use that renaming as {receiver} for
- // the lowering below.
- for (Node* dominator = effect;;) {
- if ((dominator->opcode() == IrOpcode::kCheckString ||
- dominator->opcode() == IrOpcode::kCheckInternalizedString ||
- dominator->opcode() == IrOpcode::kCheckSeqString) &&
- NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
- return dominator;
- }
- if (dominator->op()->EffectInputCount() != 1) {
- // Didn't find any appropriate CheckString node.
- return nullptr;
- }
- dominator = NodeProperties::GetEffectInput(dominator);
- }
-}
-
-} // namespace
-
-// ES6 String.prototype.concat(...args)
-// #sec-string.prototype.concat
-Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::PlainPrimitive())) {
- // String.prototype.concat(lhs:string, rhs:plain-primitive)
- // -> Call[StringAddStub](lhs, rhs)
- StringAddFlags flags = r.InputsMatchOne(Type::String())
- ? STRING_ADD_CHECK_NONE
- : STRING_ADD_CONVERT_RIGHT;
- // TODO(turbofan): Massage the FrameState of the {node} here once we
- // have an artificial builtin frame type, so that it looks like the
- // exception from StringAdd overflow came from String.prototype.concat
- // builtin instead of the calling function.
- Callable const callable =
- CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState,
- Operator::kNoDeopt | Operator::kNoWrite);
- node->ReplaceInput(0, jsgraph()->HeapConstant(callable.code()));
- node->ReplaceInput(1, receiver);
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- return Changed(node);
- }
- }
-
- return NoChange();
-}
-
-// ES section #sec-string.prototype.slice
-Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
- if (Node* receiver = GetStringWitness(node)) {
- Node* start = node->op()->ValueInputCount() >= 3
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
- Type* start_type = NodeProperties::GetType(start);
- Node* end = node->op()->ValueInputCount() >= 4
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->UndefinedConstant();
- Type* end_type = NodeProperties::GetType(end);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (start_type->Is(type_cache_.kSingletonMinusOne) &&
- end_type->Is(Type::Undefined())) {
- Node* receiver_length =
- graph()->NewNode(simplified()->StringLength(), receiver);
-
- Node* check =
- graph()->NewNode(simplified()->NumberEqual(), receiver_length,
- jsgraph()->ZeroConstant());
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->EmptyStringConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse;
- Node* efalse;
- {
- // We need to convince TurboFan that {receiver_length}-1 is a valid
- // Unsigned32 value, so we just apply NumberToUint32 to the result
- // of the subtraction, which is a no-op and merely acts as a marker.
- Node* index =
- graph()->NewNode(simplified()->NumberSubtract(), receiver_length,
- jsgraph()->OneConstant());
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- vfalse = efalse = graph()->NewNode(simplified()->StringCharAt(),
- receiver, index, effect, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- effect =
- graph()->NewNode(common()->EffectPhi(2), effect, efalse, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- return NoChange();
-}
-
-Reduction JSBuiltinReducer::ReduceArrayBufferIsView(Node* node) {
- Node* value = node->op()->ValueInputCount() >= 3
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, value);
- node->TrimInputCount(1);
- NodeProperties::ChangeOp(node, simplified()->ObjectIsArrayBufferView());
- return Changed(node);
-}
-
-Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
- Node* node, InstanceType instance_type, FieldAccess const& access) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(receiver, effect, instance_type)) {
- // Load the {receiver}s field.
- Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
- receiver, effect, control);
-
- // See if we can skip the neutering check.
- if (isolate()->IsArrayBufferNeuteringIntact()) {
- // Add a code dependency so we are deoptimized in case an ArrayBuffer
- // gets neutered.
- dependencies()->AssumePropertyCell(
- factory()->array_buffer_neutering_protector());
- } else {
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect =
- graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
- receiver_buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::Reduce(Node* node) {
Reduction reduction = NoChange();
JSCallReduction r(node);
@@ -1653,14 +382,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
if (!r.HasBuiltinFunctionId()) return NoChange();
if (!r.BuiltinCanBeInlined()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kArrayEntries:
- return ReduceArrayIterator(node, IterationKind::kEntries);
- case kArrayKeys:
- return ReduceArrayIterator(node, IterationKind::kKeys);
- case kArrayValues:
- return ReduceArrayIterator(node, IterationKind::kValues);
- case kArrayIteratorNext:
- return ReduceArrayIteratorNext(node);
case kArrayIsArray:
return ReduceArrayIsArray(node);
case kDateNow:
@@ -1673,90 +394,12 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kGlobalIsNaN:
reduction = ReduceGlobalIsNaN(node);
break;
- case kMapEntries:
- return ReduceCollectionIterator(
- node, JS_MAP_TYPE, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX);
- case kMapGet:
- reduction = ReduceMapGet(node);
- break;
- case kMapHas:
- reduction = ReduceMapHas(node);
- break;
- case kMapKeys:
- return ReduceCollectionIterator(node, JS_MAP_TYPE,
- Context::MAP_KEY_ITERATOR_MAP_INDEX);
- case kMapSize:
- return ReduceCollectionSize(node, JS_MAP_TYPE);
- case kMapValues:
- return ReduceCollectionIterator(node, JS_MAP_TYPE,
- Context::MAP_VALUE_ITERATOR_MAP_INDEX);
- case kMapIteratorNext:
- return ReduceCollectionIteratorNext(
- node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
- FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
- case kNumberIsFinite:
- reduction = ReduceNumberIsFinite(node);
- break;
- case kNumberIsInteger:
- reduction = ReduceNumberIsInteger(node);
- break;
- case kNumberIsNaN:
- reduction = ReduceNumberIsNaN(node);
- break;
- case kNumberIsSafeInteger:
- reduction = ReduceNumberIsSafeInteger(node);
- break;
case kNumberParseInt:
reduction = ReduceNumberParseInt(node);
break;
case kObjectCreate:
reduction = ReduceObjectCreate(node);
break;
- case kSetEntries:
- return ReduceCollectionIterator(
- node, JS_SET_TYPE, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX);
- case kSetSize:
- return ReduceCollectionSize(node, JS_SET_TYPE);
- case kSetValues:
- return ReduceCollectionIterator(node, JS_SET_TYPE,
- Context::SET_VALUE_ITERATOR_MAP_INDEX);
- case kSetIteratorNext:
- return ReduceCollectionIteratorNext(
- node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
- FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
- case kStringConcat:
- return ReduceStringConcat(node);
- case kStringSlice:
- return ReduceStringSlice(node);
- case kArrayBufferIsView:
- return ReduceArrayBufferIsView(node);
- case kDataViewByteLength:
- return ReduceArrayBufferViewAccessor(
- node, JS_DATA_VIEW_TYPE,
- AccessBuilder::ForJSArrayBufferViewByteLength());
- case kDataViewByteOffset:
- return ReduceArrayBufferViewAccessor(
- node, JS_DATA_VIEW_TYPE,
- AccessBuilder::ForJSArrayBufferViewByteOffset());
- case kTypedArrayByteLength:
- return ReduceArrayBufferViewAccessor(
- node, JS_TYPED_ARRAY_TYPE,
- AccessBuilder::ForJSArrayBufferViewByteLength());
- case kTypedArrayByteOffset:
- return ReduceArrayBufferViewAccessor(
- node, JS_TYPED_ARRAY_TYPE,
- AccessBuilder::ForJSArrayBufferViewByteOffset());
- case kTypedArrayLength:
- return ReduceArrayBufferViewAccessor(
- node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
- case kTypedArrayEntries:
- return ReduceTypedArrayIterator(node, IterationKind::kEntries);
- case kTypedArrayKeys:
- return ReduceTypedArrayIterator(node, IterationKind::kKeys);
- case kTypedArrayValues:
- return ReduceTypedArrayIterator(node, IterationKind::kValues);
- case kTypedArrayToStringTag:
- return ReduceTypedArrayToStringTag(node);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index d24bcc9746..eff40060bb 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -39,49 +39,14 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction Reduce(Node* node) final;
private:
- enum class ArrayIteratorKind { kArray, kTypedArray };
-
- Reduction ReduceArrayIterator(Node* node, IterationKind kind);
- Reduction ReduceTypedArrayIterator(Node* node, IterationKind kind);
- Reduction ReduceArrayIterator(Handle<Map> receiver_map, Node* node,
- IterationKind kind,
- ArrayIteratorKind iter_kind);
- Reduction ReduceArrayIteratorNext(Node* node);
- Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
- IterationKind kind);
- Reduction ReduceTypedArrayIteratorNext(InstanceType type, Node* node,
- IterationKind kind);
- Reduction ReduceTypedArrayToStringTag(Node* node);
Reduction ReduceArrayIsArray(Node* node);
- Reduction ReduceCollectionIterator(Node* node,
- InstanceType collection_instance_type,
- int collection_iterator_map_index);
- Reduction ReduceCollectionSize(Node* node,
- InstanceType collection_instance_type);
- Reduction ReduceCollectionIteratorNext(
- Node* node, int entry_size, Handle<HeapObject> empty_collection,
- InstanceType collection_iterator_instance_type_first,
- InstanceType collection_iterator_instance_type_last);
Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
- Reduction ReduceMapHas(Node* node);
- Reduction ReduceMapGet(Node* node);
- Reduction ReduceNumberIsFinite(Node* node);
- Reduction ReduceNumberIsInteger(Node* node);
- Reduction ReduceNumberIsNaN(Node* node);
- Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringSlice(Node* node);
- Reduction ReduceStringConcat(Node* node);
- Reduction ReduceArrayBufferIsView(Node* node);
- Reduction ReduceArrayBufferViewAccessor(Node* node,
- InstanceType instance_type,
- FieldAccess const& access);
-
Node* ToNumber(Node* value);
Node* ToUint32(Node* value);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 417191c680..451ec80a8d 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -16,6 +16,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/type-cache.h"
#include "src/feedback-vector-inl.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
@@ -54,7 +55,7 @@ Reduction JSCallReducer::ReduceMathBinary(Node* node, const Operator* op) {
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- if (node->op()->ValueInputCount() < 4) {
+ if (node->op()->ValueInputCount() < 3) {
Node* value = jsgraph()->NaNConstant();
ReplaceWithValue(node, value);
return Replace(value);
@@ -63,7 +64,9 @@ Reduction JSCallReducer::ReduceMathBinary(Node* node, const Operator* op) {
Node* control = NodeProperties::GetControlInput(node);
Node* left = NodeProperties::GetValueInput(node, 2);
- Node* right = NodeProperties::GetValueInput(node, 3);
+ Node* right = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->NaNConstant();
left = effect =
graph()->NewNode(simplified()->SpeculativeToNumber(
NumberOperationHint::kNumberOrOddball, p.feedback()),
@@ -83,13 +86,15 @@ Reduction JSCallReducer::ReduceMathImul(Node* node) {
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- if (node->op()->ValueInputCount() < 4) {
+ if (node->op()->ValueInputCount() < 3) {
Node* value = jsgraph()->ZeroConstant();
ReplaceWithValue(node, value);
return Replace(value);
}
Node* left = NodeProperties::GetValueInput(node, 2);
- Node* right = NodeProperties::GetValueInput(node, 3);
+ Node* right = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->ZeroConstant();
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1453,7 +1458,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -1464,7 +1469,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->array_species_protector());
Handle<JSFunction> handle_constructor(
JSFunction::cast(
@@ -1653,7 +1658,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
// The output array is packed (filter doesn't visit holes).
@@ -1668,7 +1673,7 @@ Reduction JSCallReducer::ReduceArrayFilter(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->array_species_protector());
Handle<Map> initial_map(
Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)));
@@ -2112,10 +2117,11 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
simplified()->LoadField(AccessBuilder::ForJSObjectElements()), a, etrue,
if_true);
- // We know that {to} is in Unsigned31 range here, being smaller than
- // {original_length} at all times.
+ DCHECK(TypeCache::Get().kFixedDoubleArrayLengthType->Is(
+ TypeCache::Get().kFixedArrayLengthType));
Node* checked_to = etrue = graph()->NewNode(
- common()->TypeGuard(Type::Unsigned31()), to, etrue, if_true);
+ common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), to, etrue,
+ if_true);
Node* elements_length = etrue = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
etrue, if_true);
@@ -2245,7 +2251,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
const ElementsKind kind = receiver_maps[0]->elements_kind();
@@ -2256,7 +2262,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->array_species_protector());
// If we have unreliable maps, we need a map check.
if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2438,6 +2444,122 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
return Replace(return_value);
}
+namespace {
+
+// Returns the correct Callable for Array's indexOf based on the receiver's
+// |elements_kind| and |isolate|. Assumes that |elements_kind| is a fast one.
+Callable GetCallableForArrayIndexOf(ElementsKind elements_kind,
+ Isolate* isolate) {
+ switch (elements_kind) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ return Builtins::CallableFor(isolate, Builtins::kArrayIndexOfSmiOrObject);
+ case PACKED_DOUBLE_ELEMENTS:
+ return Builtins::CallableFor(isolate,
+ Builtins::kArrayIndexOfPackedDoubles);
+ default:
+ DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
+ return Builtins::CallableFor(isolate,
+ Builtins::kArrayIndexOfHoleyDoubles);
+ }
+}
+
+// Returns the correct Callable for Array's includes based on the receiver's
+// |elements_kind| and |isolate|. Assumes that |elements_kind| is a fast one.
+Callable GetCallableForArrayIncludes(ElementsKind elements_kind,
+ Isolate* isolate) {
+ switch (elements_kind) {
+ case PACKED_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
+ case PACKED_ELEMENTS:
+ case HOLEY_ELEMENTS:
+ return Builtins::CallableFor(isolate,
+ Builtins::kArrayIncludesSmiOrObject);
+ case PACKED_DOUBLE_ELEMENTS:
+ return Builtins::CallableFor(isolate,
+ Builtins::kArrayIncludesPackedDoubles);
+ default:
+ DCHECK_EQ(HOLEY_DOUBLE_ELEMENTS, elements_kind);
+ return Builtins::CallableFor(isolate,
+ Builtins::kArrayIncludesHoleyDoubles);
+ }
+}
+
+} // namespace
+
+// For search_variant == kIndexOf:
+// ES6 Array.prototype.indexOf(searchElement[, fromIndex])
+// #sec-array.prototype.indexof
+// For search_variant == kIncludes:
+// ES7 Array.prototype.inludes(searchElement[, fromIndex])
+// #sec-array.prototype.includes
+Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
+ SearchVariant search_variant, Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Handle<Map> receiver_map;
+ if (!NodeProperties::GetMapWitness(node).ToHandle(&receiver_map))
+ return NoChange();
+
+ if (receiver_map->instance_type() != JS_ARRAY_TYPE) return NoChange();
+ if (!IsFastElementsKind(receiver_map->elements_kind())) return NoChange();
+
+ Callable const callable =
+ search_variant == SearchVariant::kIndexOf
+ ? GetCallableForArrayIndexOf(receiver_map->elements_kind(), isolate())
+ : GetCallableForArrayIncludes(receiver_map->elements_kind(),
+ isolate());
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kEliminatable);
+ // The stub expects the following arguments: the receiver array, its elements,
+ // the search_element, the array length, and the index to start searching
+ // from.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+ Node* search_element = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
+ receiver, effect, control);
+ Node* new_from_index = jsgraph()->ZeroConstant();
+ if (node->op()->ValueInputCount() >= 4) {
+ Node* from_index = NodeProperties::GetValueInput(node, 3);
+ from_index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ from_index, effect, control);
+ // If the index is negative, it means the offset from the end and therefore
+ // needs to be added to the length. If the result is still negative, it
+ // needs to be clamped to 0.
+ new_from_index = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), from_index,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(
+ simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, from_index),
+ jsgraph()->ZeroConstant()),
+ from_index);
+ }
+
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* replacement_node = effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), elements,
+ search_element, length, new_from_index, context, effect);
+ ReplaceWithValue(node, replacement_node, effect);
+ return Replace(replacement_node);
+}
+
Reduction JSCallReducer::ReduceArraySome(Node* node,
Handle<SharedFunctionInfo> shared) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
@@ -2465,7 +2587,7 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
if (receiver_maps.size() == 0) return NoChange();
@@ -2478,7 +2600,7 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
if (receiver_map->elements_kind() != kind) return NoChange();
}
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->array_species_protector());
Node* k = jsgraph()->ZeroConstant();
@@ -2798,7 +2920,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// ensure that it's safe to avoid the actual iteration.
if ((node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstructWithSpread) &&
- !isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+ !isolate()->IsArrayIteratorLookupChainIntact()) {
return NoChange();
}
@@ -2870,7 +2992,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// some other function (and same for the {arguments_list}).
CreateArgumentsType const type = CreateArgumentsTypeOf(arguments_list->op());
Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int start_index = 0;
// Determine the formal parameter count;
Handle<SharedFunctionInfo> shared;
@@ -2889,24 +3011,14 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
}
} else if (type == CreateArgumentsType::kRestParameter) {
start_index = formal_parameter_count;
-
- // For spread calls/constructs with rest parameters we need to ensure that
- // the array iterator protector is intact, which guards that the rest
- // parameter iteration is not observable.
- if (node->opcode() == IrOpcode::kJSCallWithSpread ||
- node->opcode() == IrOpcode::kJSConstructWithSpread) {
- if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
- }
}
// For call/construct with spread, we need to also install a code
- // dependency on the initial %ArrayIteratorPrototype% map here to
- // ensure that no one messes with the next method.
+ // dependency on the array iterator lookup protector cell to ensure
+ // that no one messed with the %ArrayIteratorPrototype%.next method.
if (node->opcode() == IrOpcode::kJSCallWithSpread ||
node->opcode() == IrOpcode::kJSConstructWithSpread) {
- dependencies()->AssumeMapStable(
- isolate()->initial_array_iterator_prototype_map());
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
}
// Remove the {arguments_list} input from the {node}.
@@ -2926,7 +3038,7 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
- FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ FrameStateInfo outer_info = FrameStateInfoOf(outer_state->op());
if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
// Need to take the parameters from the arguments adaptor.
frame_state = outer_state;
@@ -3186,7 +3298,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
}
// Check for known builtin functions.
- switch (shared->code()->builtin_index()) {
+
+ int builtin_id =
+ shared->HasBuiltinId() ? shared->builtin_id() : Builtins::kNoBuiltinId;
+ switch (builtin_id) {
case Builtins::kArrayConstructor:
return ReduceArrayConstructor(node);
case Builtins::kBooleanConstructor:
@@ -3237,6 +3352,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceArrayFind(node, ArrayFindVariant::kFindIndex, shared);
case Builtins::kArrayEvery:
return ReduceArrayEvery(node, shared);
+ case Builtins::kArrayIndexOf:
+ return ReduceArrayIndexOfIncludes(SearchVariant::kIndexOf, node);
+ case Builtins::kArrayIncludes:
+ return ReduceArrayIndexOfIncludes(SearchVariant::kIncludes, node);
case Builtins::kArraySome:
return ReduceArraySome(node, shared);
case Builtins::kArrayPrototypePush:
@@ -3245,6 +3364,37 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceArrayPrototypePop(node);
case Builtins::kArrayPrototypeShift:
return ReduceArrayPrototypeShift(node);
+ case Builtins::kArrayPrototypeEntries:
+ return ReduceArrayIterator(node, IterationKind::kEntries);
+ case Builtins::kArrayPrototypeKeys:
+ return ReduceArrayIterator(node, IterationKind::kKeys);
+ case Builtins::kArrayPrototypeValues:
+ return ReduceArrayIterator(node, IterationKind::kValues);
+ case Builtins::kArrayIteratorPrototypeNext:
+ return ReduceArrayIteratorPrototypeNext(node);
+ case Builtins::kArrayBufferIsView:
+ return ReduceArrayBufferIsView(node);
+ case Builtins::kDataViewPrototypeGetByteLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_DATA_VIEW_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteLength());
+ case Builtins::kDataViewPrototypeGetByteOffset:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_DATA_VIEW_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Builtins::kTypedArrayPrototypeByteLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteLength());
+ case Builtins::kTypedArrayPrototypeByteOffset:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE,
+ AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Builtins::kTypedArrayPrototypeLength:
+ return ReduceArrayBufferViewAccessor(
+ node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
+ case Builtins::kTypedArrayPrototypeToStringTag:
+ return ReduceTypedArrayPrototypeToStringTag(node);
case Builtins::kMathAbs:
return ReduceMathUnary(node, simplified()->NumberAbs());
case Builtins::kMathAcos:
@@ -3313,12 +3463,24 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
case Builtins::kMathMin:
return ReduceMathMinMax(node, simplified()->NumberMin(),
jsgraph()->Constant(V8_INFINITY));
+ case Builtins::kNumberIsFinite:
+ return ReduceNumberIsFinite(node);
+ case Builtins::kNumberIsInteger:
+ return ReduceNumberIsInteger(node);
+ case Builtins::kNumberIsSafeInteger:
+ return ReduceNumberIsSafeInteger(node);
+ case Builtins::kNumberIsNaN:
+ return ReduceNumberIsNaN(node);
+ case Builtins::kMapPrototypeGet:
+ return ReduceMapPrototypeGet(node);
+ case Builtins::kMapPrototypeHas:
+ return ReduceMapPrototypeHas(node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
case Builtins::kStringPrototypeIndexOf:
return ReduceStringPrototypeIndexOf(node);
case Builtins::kStringPrototypeCharAt:
- return ReduceStringPrototypeStringAt(simplified()->StringCharAt(), node);
+ return ReduceStringPrototypeCharAt(node);
case Builtins::kStringPrototypeCharCodeAt:
return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
node);
@@ -3329,6 +3491,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringPrototypeSubstring(node);
case Builtins::kStringPrototypeSlice:
return ReduceStringPrototypeSlice(node);
+ case Builtins::kStringPrototypeSubstr:
+ return ReduceStringPrototypeSubstr(node);
#ifdef V8_INTL_SUPPORT
case Builtins::kStringPrototypeToLowerCaseIntl:
return ReduceStringPrototypeToLowerCaseIntl(node);
@@ -3337,10 +3501,20 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
#endif // V8_INTL_SUPPORT
case Builtins::kStringFromCharCode:
return ReduceStringFromCharCode(node);
+ case Builtins::kStringFromCodePoint:
+ return ReduceStringFromCodePoint(node);
case Builtins::kStringPrototypeIterator:
return ReduceStringPrototypeIterator(node);
case Builtins::kStringIteratorPrototypeNext:
return ReduceStringIteratorPrototypeNext(node);
+ case Builtins::kStringPrototypeConcat:
+ return ReduceStringPrototypeConcat(node, shared);
+ case Builtins::kTypedArrayPrototypeEntries:
+ return ReduceArrayIterator(node, IterationKind::kEntries);
+ case Builtins::kTypedArrayPrototypeKeys:
+ return ReduceArrayIterator(node, IterationKind::kKeys);
+ case Builtins::kTypedArrayPrototypeValues:
+ return ReduceArrayIterator(node, IterationKind::kValues);
case Builtins::kAsyncFunctionPromiseCreate:
return ReduceAsyncFunctionPromiseCreate(node);
case Builtins::kAsyncFunctionPromiseRelease:
@@ -3361,6 +3535,33 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReducePromisePrototypeFinally(node);
case Builtins::kPromisePrototypeThen:
return ReducePromisePrototypeThen(node);
+ case Builtins::kMapPrototypeEntries:
+ return ReduceCollectionIteration(node, CollectionKind::kMap,
+ IterationKind::kEntries);
+ case Builtins::kMapPrototypeKeys:
+ return ReduceCollectionIteration(node, CollectionKind::kMap,
+ IterationKind::kKeys);
+ case Builtins::kMapPrototypeGetSize:
+ return ReduceCollectionPrototypeSize(node, CollectionKind::kMap);
+ case Builtins::kMapPrototypeValues:
+ return ReduceCollectionIteration(node, CollectionKind::kMap,
+ IterationKind::kValues);
+ case Builtins::kMapIteratorPrototypeNext:
+ return ReduceCollectionIteratorPrototypeNext(
+ node, OrderedHashMap::kEntrySize, factory()->empty_ordered_hash_map(),
+ FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE);
+ case Builtins::kSetPrototypeEntries:
+ return ReduceCollectionIteration(node, CollectionKind::kSet,
+ IterationKind::kEntries);
+ case Builtins::kSetPrototypeGetSize:
+ return ReduceCollectionPrototypeSize(node, CollectionKind::kSet);
+ case Builtins::kSetPrototypeValues:
+ return ReduceCollectionIteration(node, CollectionKind::kSet,
+ IterationKind::kValues);
+ case Builtins::kSetIteratorPrototypeNext:
+ return ReduceCollectionIteratorPrototypeNext(
+ node, OrderedHashSet::kEntrySize, factory()->empty_ordered_hash_set(),
+ FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE);
default:
break;
}
@@ -3490,44 +3691,51 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Don't inline cross native context.
if (function->native_context() != *native_context()) return NoChange();
- // Check for the ArrayConstructor.
- if (*function == function->native_context()->array_function()) {
- // TODO(bmeurer): Deal with Array subclasses here.
- Handle<AllocationSite> site;
- // Turn the {node} into a {JSCreateArray} call.
- for (int i = arity; i > 0; --i) {
- NodeProperties::ReplaceValueInput(
- node, NodeProperties::GetValueInput(node, i), i + 1);
- }
- NodeProperties::ReplaceValueInput(node, new_target, 1);
- NodeProperties::ChangeOp(node, javascript()->CreateArray(arity, site));
- return Changed(node);
- }
-
- // Check for the ObjectConstructor.
- if (*function == function->native_context()->object_function()) {
- // If no value is passed, we can immediately lower to a simple
- // JSCreate and don't need to do any massaging of the {node}.
- if (arity == 0) {
- NodeProperties::ChangeOp(node, javascript()->Create());
+ // Check for known builtin functions.
+ int builtin_id = function->shared()->HasBuiltinId()
+ ? function->shared()->builtin_id()
+ : Builtins::kNoBuiltinId;
+ switch (builtin_id) {
+ case Builtins::kArrayConstructor: {
+ // TODO(bmeurer): Deal with Array subclasses here.
+ Handle<AllocationSite> site;
+ // Turn the {node} into a {JSCreateArray} call.
+ for (int i = arity; i > 0; --i) {
+ NodeProperties::ReplaceValueInput(
+ node, NodeProperties::GetValueInput(node, i), i + 1);
+ }
+ NodeProperties::ReplaceValueInput(node, new_target, 1);
+ NodeProperties::ChangeOp(node,
+ javascript()->CreateArray(arity, site));
return Changed(node);
}
-
- // Otherwise we can only lower to JSCreate if we know that
- // the value parameter is ignored, which is only the case if
- // the {new_target} and {target} are definitely not identical.
- HeapObjectMatcher mnew_target(new_target);
- if (mnew_target.HasValue() && *mnew_target.Value() != *function) {
- // Drop the value inputs.
- for (int i = arity; i > 0; --i) node->RemoveInput(i);
- NodeProperties::ChangeOp(node, javascript()->Create());
- return Changed(node);
+ case Builtins::kObjectConstructor: {
+ // If no value is passed, we can immediately lower to a simple
+ // JSCreate and don't need to do any massaging of the {node}.
+ if (arity == 0) {
+ NodeProperties::ChangeOp(node, javascript()->Create());
+ return Changed(node);
+ }
+
+ // Otherwise we can only lower to JSCreate if we know that
+ // the value parameter is ignored, which is only the case if
+ // the {new_target} and {target} are definitely not identical.
+ HeapObjectMatcher mnew_target(new_target);
+ if (mnew_target.HasValue() && *mnew_target.Value() != *function) {
+ // Drop the value inputs.
+ for (int i = arity; i > 0; --i) node->RemoveInput(i);
+ NodeProperties::ChangeOp(node, javascript()->Create());
+ return Changed(node);
+ }
+ break;
}
- }
-
- // Check for the PromiseConstructor
- if (*function == function->native_context()->promise_function()) {
- return ReducePromiseConstructor(node);
+ case Builtins::kPromiseConstructor:
+ return ReducePromiseConstructor(node);
+ case Builtins::kTypedArrayConstructor:
+ return ReduceTypedArrayConstructor(
+ node, handle(function->shared(), isolate()));
+ default:
+ break;
}
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
@@ -3818,6 +4026,109 @@ Reduction JSCallReducer::ReduceStringPrototypeSlice(Node* node) {
return Replace(result_string);
}
+// ES #sec-string.prototype.substr
+Reduction JSCallReducer::ReduceStringPrototypeSubstr(Node* node) {
+ if (node->op()->ValueInputCount() < 3) return NoChange();
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* start = NodeProperties::GetValueInput(node, 2);
+ Node* end = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ start = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()), start,
+ effect, control);
+
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Replace {end} argument with {length} if it is undefined.
+ {
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), end,
+ jsgraph()->UndefinedConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = length;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), end, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ end = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ Node* initStart = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->NumberLessThan(), start,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(
+ simplified()->NumberMax(),
+ graph()->NewNode(simplified()->NumberAdd(), length, start),
+ jsgraph()->ZeroConstant()),
+ start);
+ // The select above guarantees that initStart is non-negative, but
+ // our typer can't figure that out yet.
+ initStart = effect = graph()->NewNode(
+ common()->TypeGuard(Type::UnsignedSmall()), initStart, effect, control);
+
+ Node* resultLength = graph()->NewNode(
+ simplified()->NumberMin(),
+ graph()->NewNode(simplified()->NumberMax(), end,
+ jsgraph()->ZeroConstant()),
+ graph()->NewNode(simplified()->NumberSubtract(), length, initStart));
+
+ // The the select below uses {resultLength} only if {resultLength > 0},
+ // but our typer can't figure that out yet.
+ Node* to = effect = graph()->NewNode(
+ common()->TypeGuard(Type::UnsignedSmall()),
+ graph()->NewNode(simplified()->NumberAdd(), initStart, resultLength),
+ effect, control);
+
+ Node* result_string = nullptr;
+ // Return empty string if {from} is smaller than {to}.
+ {
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(),
+ jsgraph()->ZeroConstant(), resultLength);
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = etrue =
+ graph()->NewNode(simplified()->StringSubstring(), receiver, initStart,
+ to, etrue, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ result_string =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ }
+
+ ReplaceWithValue(node, result_string, effect, control);
+ return Replace(result_string);
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -4308,13 +4619,300 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
return Replace(value);
}
-// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if we know that {receiver} is a valid JSReceiver.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!receiver_map->IsJSReceiverMap()) return NoChange();
+ }
+
+ // Morph the {node} into a JSCreateArrayIterator with the given {kind}.
+ RelaxControls(node);
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, context);
+ node->ReplaceInput(2, effect);
+ node->ReplaceInput(3, control);
+ node->TrimInputCount(4);
+ NodeProperties::ChangeOp(node, javascript()->CreateArrayIterator(kind));
+ return Changed(node);
+}
+
+namespace {
+
+bool InferIteratedObjectMaps(Node* iterator,
+ ZoneHandleSet<Map>* iterated_object_maps) {
+ DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, iterator->opcode());
+ Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
+ Node* effect = NodeProperties::GetEffectInput(iterator);
+
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(iterated_object, effect,
+ iterated_object_maps);
+ return result != NodeProperties::kNoReceiverMaps;
+}
+
+} // namespace
+
+// ES #sec-%arrayiteratorprototype%.next
+Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ Node* iterator = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // Check if the {iterator} is a JSCreateArrayIterator.
+ if (iterator->opcode() != IrOpcode::kJSCreateArrayIterator) return NoChange();
+ IterationKind const iteration_kind =
+ CreateArrayIteratorParametersOf(iterator->op()).kind();
+
+ // Try to infer the [[IteratedObject]] maps from the {iterator}.
+ ZoneHandleSet<Map> iterated_object_maps;
+ if (!InferIteratedObjectMaps(iterator, &iterated_object_maps)) {
+ return NoChange();
+ }
+ DCHECK_NE(0, iterated_object_maps.size());
+
+ // Check that various {iterated_object_maps} have compatible elements kinds.
+ ElementsKind elements_kind = iterated_object_maps[0]->elements_kind();
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ // TurboFan doesn't support loading from BigInt typed arrays yet.
+ if (elements_kind == BIGUINT64_ELEMENTS ||
+ elements_kind == BIGINT64_ELEMENTS) {
+ return NoChange();
+ }
+ for (Handle<Map> iterated_object_map : iterated_object_maps) {
+ if (iterated_object_map->elements_kind() != elements_kind) {
+ return NoChange();
+ }
+ }
+ } else {
+ for (Handle<Map> iterated_object_map : iterated_object_maps) {
+ if (!CanInlineArrayIteratingBuiltin(iterated_object_map)) {
+ return NoChange();
+ }
+ if (!UnionElementsKindUptoSize(&elements_kind,
+ iterated_object_map->elements_kind())) {
+ return NoChange();
+ }
+ }
+ }
+
+ // Install code dependency on the array protector for holey arrays.
+ if (IsHoleyElementsKind(elements_kind)) {
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+ }
+
+ // Load the (current) {iterated_object} from the {iterator}; this might be
+ // either undefined or the JSReceiver that was passed to the JSArrayIterator
+ // creation.
+ Node* iterated_object = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayIteratorIteratedObject()),
+ iterator, effect, control);
+
+ // Ensure that the {iterated_object} map didn't change. This also rules
+ // out the undefined that we put as a termination marker into the
+ // iterator.[[IteratedObject]] field once we reach the end.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, iterated_object_maps,
+ p.feedback()),
+ iterated_object, effect, control);
+
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Deoptimize if the array buffer was neutered.
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ iterated_object, effect, control);
+
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check = graph()->NewNode(simplified()->BooleanNot(), check);
+ // TODO(bmeurer): Pass p.feedback(), or better introduce
+ // CheckArrayBufferNotNeutered?
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasNeutered),
+ check, effect, control);
+ }
+ }
+
+ // Load the [[NextIndex]] from the {iterator} and leverage the fact
+ // that we definitely know that it's in Unsigned32 range since the
+ // {iterated_object} is either a JSArray or a JSTypedArray. For the
+ // latter case we even know that it's a Smi in UnsignedSmall range.
+ FieldAccess index_access = AccessBuilder::ForJSArrayIteratorNextIndex();
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ index_access.type = TypeCache::Get().kJSTypedArrayLengthType;
+ index_access.machine_type = MachineType::TaggedSigned();
+ index_access.write_barrier_kind = kNoWriteBarrier;
+ } else {
+ index_access.type = TypeCache::Get().kJSArrayLengthType;
+ }
+ Node* index = effect = graph()->NewNode(simplified()->LoadField(index_access),
+ iterator, effect, control);
+
+ // Load the length of the {iterated_object}. Due to the map checks we
+ // already know something about the length here, which we can leverage
+ // to generate Word32 operations below without additional checking.
+ FieldAccess length_access =
+ IsFixedTypedArrayElementsKind(elements_kind)
+ ? AccessBuilder::ForJSTypedArrayLength()
+ : AccessBuilder::ForJSArrayLength(elements_kind);
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(length_access), iterated_object, effect, control);
+
+ // Check whether {index} is within the valid range for the {iterated_object}.
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* done_true;
+ Node* value_true;
+ Node* etrue = effect;
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ {
+ // We know that the {index} is range of the {length} now.
+ index = etrue = graph()->NewNode(
+ common()->TypeGuard(
+ Type::Range(0.0, length_access.type->Max() - 1.0, graph()->zone())),
+ index, etrue, if_true);
+
+ done_true = jsgraph()->FalseConstant();
+ if (iteration_kind == IterationKind::kKeys) {
+ // Just return the {index}.
+ value_true = index;
+ } else {
+ DCHECK(iteration_kind == IterationKind::kEntries ||
+ iteration_kind == IterationKind::kValues);
+
+ Node* elements = etrue = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ iterated_object, etrue, if_true);
+
+ if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ Node* base_ptr = etrue = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, etrue, if_true);
+ Node* external_ptr = etrue = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+ elements, etrue, if_true);
+
+ ExternalArrayType array_type = kExternalInt8Array;
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ array_type = kExternal##Type##Array; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+
+ Node* buffer = etrue =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ iterated_object, etrue, if_true);
+
+ value_true = etrue =
+ graph()->NewNode(simplified()->LoadTypedElement(array_type), buffer,
+ base_ptr, external_ptr, index, etrue, if_true);
+ } else {
+ value_true = etrue = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(elements_kind)),
+ elements, index, etrue, if_true);
+
+ // Convert hole to undefined if needed.
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
+ value_true = graph()->NewNode(
+ simplified()->ConvertTaggedHoleToUndefined(), value_true);
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(6587): avoid deopt if not all uses of value are truncated.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
+ value_true = etrue = graph()->NewNode(
+ simplified()->CheckFloat64Hole(mode), value_true, etrue, if_true);
+ }
+ }
+
+ if (iteration_kind == IterationKind::kEntries) {
+ // Allocate elements for key/value pair
+ value_true = etrue =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), index,
+ value_true, context, etrue);
+ } else {
+ DCHECK_EQ(IterationKind::kValues, iteration_kind);
+ }
+ }
+
+ // Increment the [[NextIndex]] field in the {iterator}. The TypeGuards
+ // above guarantee that the {next_index} is in the UnsignedSmall range.
+ Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ etrue = graph()->NewNode(simplified()->StoreField(index_access), iterator,
+ next_index, etrue, if_true);
+ }
+
+ Node* done_false;
+ Node* value_false;
+ Node* efalse = effect;
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ {
+ // iterator.[[NextIndex]] >= array.length, stop iterating.
+ done_false = jsgraph()->TrueConstant();
+ value_false = jsgraph()->UndefinedConstant();
+ efalse =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSArrayIteratorIteratedObject()),
+ iterator, value_false, efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ value_true, value_false, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ done_true, done_false, control);
+
+ // Create IteratorResult object.
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
// ES6 section 21.1.3.3 String.prototype.codePointAt ( pos )
Reduction JSCallReducer::ReduceStringPrototypeStringAt(
const Operator* string_access_operator, Node* node) {
- DCHECK(string_access_operator->opcode() == IrOpcode::kStringCharAt ||
- string_access_operator->opcode() == IrOpcode::kStringCharCodeAt ||
+ DCHECK(string_access_operator->opcode() == IrOpcode::kStringCharCodeAt ||
string_access_operator->opcode() == IrOpcode::kStringCodePointAt);
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -4351,6 +4949,45 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
return Replace(value);
}
+// ES section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Ensure that the {receiver} is actually a String.
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check that the {index} is within range.
+ index = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ index, receiver_length, effect, control);
+
+ // Return the character from the {receiver} as single character string.
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+ Node* value = effect =
+ graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
+ effect, control);
+ value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
#ifdef V8_INTL_SUPPORT
Reduction JSCallReducer::ReduceStringPrototypeToLowerCaseIntl(Node* node) {
@@ -4399,7 +5036,7 @@ Reduction JSCallReducer::ReduceStringPrototypeToUpperCaseIntl(Node* node) {
#endif // V8_INTL_SUPPORT
-// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
+// ES #sec-string.fromcharcode
Reduction JSCallReducer::ReduceStringFromCharCode(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
@@ -4416,7 +5053,35 @@ Reduction JSCallReducer::ReduceStringFromCharCode(Node* node) {
p.feedback()),
input, effect, control);
- Node* value = graph()->NewNode(simplified()->StringFromCharCode(), input);
+ Node* value =
+ graph()->NewNode(simplified()->StringFromSingleCharCode(), input);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+// ES #sec-string.fromcodepoint
+Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() == 3) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* input = NodeProperties::GetValueInput(node, 2);
+
+ input = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ input, effect, control);
+
+ input = effect =
+ graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
+ jsgraph()->Constant(0x10FFFF + 1), effect, control);
+
+ Node* value = graph()->NewNode(
+ simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -4471,7 +5136,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string,
index, etrue0, if_true0);
vtrue0 = graph()->NewNode(
- simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), codepoint);
+ simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
+ codepoint);
// Update iterator.[[NextIndex]]
Node* char_length =
@@ -4508,6 +5174,52 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
return NoChange();
}
+// ES #sec-string.prototype.concat
+Reduction JSCallReducer::ReduceStringPrototypeConcat(
+ Node* node, Handle<SharedFunctionInfo> shared) {
+ if (node->op()->ValueInputCount() < 2 || node->op()->ValueInputCount() > 3) {
+ return NoChange();
+ }
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* receiver = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 1), effect, control);
+
+ if (node->op()->ValueInputCount() < 3) {
+ ReplaceWithValue(node, receiver, effect, control);
+ return Replace(receiver);
+ }
+ Node* argument = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+
+ Callable const callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState,
+ Operator::kNoDeopt | Operator::kNoWrite);
+
+ // TODO(turbofan): Massage the FrameState of the {node} here once we
+ // have an artificial builtin frame type, so that it looks like the
+ // exception from StringAdd overflow came from String.prototype.concat
+ // builtin instead of the calling function.
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+
+ Node* value = effect = control = graph()->NewNode(
+ common()->Call(call_descriptor), jsgraph()->HeapConstant(callable.code()),
+ receiver, argument, context, outer_frame_state, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSCallReducer::ReduceAsyncFunctionPromiseCreate(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* context = NodeProperties::GetContextInput(node);
@@ -4695,6 +5407,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
if (!FLAG_experimental_inline_promise_constructor) return NoChange();
+ if (!isolate()->IsPromiseHookProtectorIntact()) return NoChange();
// Only handle builtins Promises, not subclasses.
if (target != new_target) return NoChange();
@@ -4714,23 +5427,25 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
node, outer_frame_state, 1, BailoutId::ConstructStubInvoke(),
FrameStateType::kConstructStub, promise_shared);
- // This frame state doesn't ever call the deopt continuation, it's only
- // necessary to specifiy a continuation in order to handle the exceptional
- // case.
- Node* checkpoint_params[] = {jsgraph()->UndefinedConstant(),
- jsgraph()->UndefinedConstant()};
- const int stack_parameters = arraysize(checkpoint_params);
-
+ // The deopt continuation of this frame state is never called; the frame state
+ // is only necessary to obtain the right stack trace.
+ const std::vector<Node*> checkpoint_parameters({
+ jsgraph()->UndefinedConstant(), /* receiver */
+ jsgraph()->UndefinedConstant(), /* promise */
+ jsgraph()->UndefinedConstant(), /* reject function */
+ jsgraph()->TheHoleConstant() /* exception */
+ });
+ int checkpoint_parameters_size =
+ static_cast<int>(checkpoint_parameters.size());
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), promise_shared,
Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
- &checkpoint_params[0], stack_parameters, constructor_frame_state,
- ContinuationFrameStateMode::LAZY);
+ checkpoint_parameters.data(), checkpoint_parameters_size,
+ constructor_frame_state, ContinuationFrameStateMode::LAZY);
// Check if executor is callable
Node* check_fail = nullptr;
Node* check_throw = nullptr;
- // TODO(petermarshall): The frame state is wrong here.
WireInCallbackIsCallableCheck(executor, context, frame_state, effect,
&control, &check_fail, &check_throw);
@@ -4760,7 +5475,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* resolve = effect =
graph()->NewNode(javascript()->CreateClosure(
resolve_shared, factory()->many_closures_cell(),
- handle(resolve_shared->code(), isolate())),
+ handle(resolve_shared->GetCode(), isolate())),
promise_context, effect, control);
// Allocate the closure for the reject case.
@@ -4770,21 +5485,21 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* reject = effect =
graph()->NewNode(javascript()->CreateClosure(
reject_shared, factory()->many_closures_cell(),
- handle(reject_shared->code(), isolate())),
+ handle(reject_shared->GetCode(), isolate())),
promise_context, effect, control);
- // Re-use the params from above, but actually set the promise parameter now.
- checkpoint_params[1] = promise;
-
- // This simple continuation just returns the created promise.
- // TODO(petermarshall): If the executor function causes lazy deopt, and it
- // also throws an exception, we should catch the exception and call the reject
- // function.
+ const std::vector<Node*> checkpoint_parameters_continuation(
+ {jsgraph()->UndefinedConstant() /* receiver */, promise, reject});
+ int checkpoint_parameters_continuation_size =
+ static_cast<int>(checkpoint_parameters_continuation.size());
+ // This continuation just returns the created promise and takes care of
+ // exceptions thrown by the executor.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), promise_shared,
Builtins::kPromiseConstructorLazyDeoptContinuation, target, context,
- &checkpoint_params[0], stack_parameters, constructor_frame_state,
- ContinuationFrameStateMode::LAZY);
+ checkpoint_parameters_continuation.data(),
+ checkpoint_parameters_continuation_size, constructor_frame_state,
+ ContinuationFrameStateMode::LAZY_WITH_CATCH);
// 9. Call executor with both resolving functions
effect = control = graph()->NewNode(
@@ -4997,7 +5712,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
// the initial %PromisePrototype%, and the Symbol.species lookup on the
// %PromisePrototype%.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsPromiseSpeciesLookupChainIntact()) return NoChange();
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
@@ -5018,7 +5733,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// Add a code dependency on the necessary protectors.
dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
dependencies()->AssumePropertyCell(factory()->promise_then_protector());
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->promise_species_protector());
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5065,7 +5780,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
catch_true = etrue =
graph()->NewNode(javascript()->CreateClosure(
catch_finally, factory()->many_closures_cell(),
- handle(catch_finally->code(), isolate())),
+ handle(catch_finally->GetCode(), isolate())),
context, etrue, if_true);
// Allocate the closure for the fulfill case.
@@ -5074,7 +5789,7 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
then_true = etrue =
graph()->NewNode(javascript()->CreateClosure(
then_finally, factory()->many_closures_cell(),
- handle(then_finally->code(), isolate())),
+ handle(then_finally->GetCode(), isolate())),
context, etrue, if_true);
}
@@ -5144,7 +5859,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// guards the "constructor" lookup on all JSPromise instances and the
// initial Promise.prototype, as well as the Symbol.species lookup on
// the Promise constructor.
- if (!isolate()->IsSpeciesLookupChainIntact()) return NoChange();
+ if (!isolate()->IsPromiseSpeciesLookupChainIntact()) return NoChange();
// Check if we know something about {receiver} already.
ZoneHandleSet<Map> receiver_maps;
@@ -5166,7 +5881,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// Add a code dependency on the necessary protectors.
dependencies()->AssumePropertyCell(factory()->promise_hook_protector());
- dependencies()->AssumePropertyCell(factory()->species_protector());
+ dependencies()->AssumePropertyCell(factory()->promise_species_protector());
// If the {receiver_maps} aren't reliable, we need to repeat the
// map check here, guarded by the CALL_IC.
@@ -5239,6 +5954,626 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
return Changed(node);
}
+// ES #sec-typedarray-constructors
+Reduction JSCallReducer::ReduceTypedArrayConstructor(
+ Node* node, Handle<SharedFunctionInfo> shared) {
+ DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+ ConstructParameters const& p = ConstructParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* arg1 = (arity >= 1) ? NodeProperties::GetValueInput(node, 1)
+ : jsgraph()->UndefinedConstant();
+ Node* arg2 = (arity >= 2) ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* arg3 = (arity >= 3) ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Insert a construct stub frame into the chain of frame states. This will
+ // reconstruct the proper frame when deoptimizing within the constructor.
+ frame_state = CreateArtificialFrameState(
+ node, frame_state, arity, BailoutId::ConstructStubInvoke(),
+ FrameStateType::kConstructStub, shared);
+
+ // This continuation just returns the newly created JSTypedArray. We
+ // pass the_hole as the receiver, just like the builtin construct stub
+ // does in this case.
+ Node* const parameters[] = {jsgraph()->TheHoleConstant()};
+ int const num_parameters = static_cast<int>(arraysize(parameters));
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, Builtins::kTypedArrayConstructorLazyDeoptContinuation,
+ target, context, parameters, num_parameters, frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ Node* result =
+ graph()->NewNode(javascript()->CreateTypedArray(), target, new_target,
+ arg1, arg2, arg3, context, frame_state, effect, control);
+ return Replace(result);
+}
+
+// ES #sec-get-%typedarray%.prototype-@@tostringtag
+Reduction JSCallReducer::ReduceTypedArrayPrototypeToStringTag(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ NodeVector values(graph()->zone());
+ NodeVector effects(graph()->zone());
+ NodeVector controls(graph()->zone());
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), receiver);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ values.push_back(jsgraph()->UndefinedConstant());
+ effects.push_back(effect);
+ controls.push_back(graph()->NewNode(common()->IfTrue(), control));
+
+ control = graph()->NewNode(common()->IfFalse(), control);
+ Node* receiver_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* receiver_bit_field2 = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
+ effect, control);
+ Node* receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberShiftRightLogical(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
+ jsgraph()->Constant(Map::ElementsKindBits::kMask)),
+ jsgraph()->Constant(Map::ElementsKindBits::kShift));
+
+ // Offset the elements kind by FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ // so that the branch cascade below is turned into a simple table
+ // switch by the ControlFlowOptimizer later.
+ receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberSubtract(), receiver_elements_kind,
+ jsgraph()->Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND));
+
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ do { \
+ Node* check = graph()->NewNode( \
+ simplified()->NumberEqual(), receiver_elements_kind, \
+ jsgraph()->Constant(TYPE##_ELEMENTS - \
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)); \
+ control = graph()->NewNode(common()->Branch(), check, control); \
+ values.push_back(jsgraph()->HeapConstant( \
+ factory()->InternalizeUtf8String(#Type "Array"))); \
+ effects.push_back(effect); \
+ controls.push_back(graph()->NewNode(common()->IfTrue(), control)); \
+ control = graph()->NewNode(common()->IfFalse(), control); \
+ } while (false);
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+ values.push_back(jsgraph()->UndefinedConstant());
+ effects.push_back(effect);
+ controls.push_back(control);
+
+ int const count = static_cast<int>(controls.size());
+ control = graph()->NewNode(common()->Merge(count), count, &controls.front());
+ effects.push_back(control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(count), count + 1, &effects.front());
+ values.push_back(control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values.front());
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES #sec-number.isfinite
+Reduction JSCallReducer::ReduceNumberIsFinite(Node* node) {
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* value = graph()->NewNode(simplified()->ObjectIsFiniteNumber(), input);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+// ES #sec-number.isfinite
+Reduction JSCallReducer::ReduceNumberIsInteger(Node* node) {
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* value = graph()->NewNode(simplified()->ObjectIsInteger(), input);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+// ES #sec-number.issafeinteger
+Reduction JSCallReducer::ReduceNumberIsSafeInteger(Node* node) {
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* value = graph()->NewNode(simplified()->ObjectIsSafeInteger(), input);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+// ES #sec-number.isnan
+Reduction JSCallReducer::ReduceNumberIsNaN(Node* node) {
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* input = NodeProperties::GetValueInput(node, 2);
+ Node* value = graph()->NewNode(simplified()->ObjectIsNaN(), input);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+}
+
+Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
+
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* entry = effect = graph()->NewNode(
+ simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), entry,
+ jsgraph()->MinusOneConstant());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ // Key not found.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ // Key found.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForOrderedHashMapEntryValue()),
+ table, entry, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!NodeProperties::HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE))
+ return NoChange();
+
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* index = effect = graph()->NewNode(
+ simplified()->FindOrderedHashMapEntry(), table, key, effect, control);
+
+ Node* value = graph()->NewNode(simplified()->NumberEqual(), index,
+ jsgraph()->MinusOneConstant());
+ value = graph()->NewNode(simplified()->BooleanNot(), value);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+namespace {
+
+InstanceType InstanceTypeForCollectionKind(CollectionKind kind) {
+ switch (kind) {
+ case CollectionKind::kMap:
+ return JS_MAP_TYPE;
+ case CollectionKind::kSet:
+ return JS_SET_TYPE;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+Reduction JSCallReducer::ReduceCollectionIteration(
+ Node* node, CollectionKind collection_kind, IterationKind iteration_kind) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (NodeProperties::HasInstanceTypeWitness(
+ receiver, effect, InstanceTypeForCollectionKind(collection_kind))) {
+ Node* js_create_iterator = effect = graph()->NewNode(
+ javascript()->CreateCollectionIterator(collection_kind, iteration_kind),
+ receiver, context, effect, control);
+ ReplaceWithValue(node, js_create_iterator, effect);
+ return Replace(js_create_iterator);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceCollectionPrototypeSize(
+ Node* node, CollectionKind collection_kind) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (NodeProperties::HasInstanceTypeWitness(
+ receiver, effect, InstanceTypeForCollectionKind(collection_kind))) {
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
+ Node* node, int entry_size, Handle<HeapObject> empty_collection,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // A word of warning to begin with: This whole method might look a bit
+ // strange at times, but that's mostly because it was carefully handcrafted
+ // to allow for full escape analysis and scalar replacement of both the
+ // collection iterator object and the iterator results, including the
+ // key-value arrays in case of Set/Map entry iteration.
+ //
+ // TODO(turbofan): Currently the escape analysis (and the store-load
+ // forwarding) is unable to eliminate the allocations for the key-value
+ // arrays in case of Set/Map entry iteration, and we should investigate
+ // how to update the escape analysis / arrange the graph in a way that
+ // this becomes possible.
+
+ // Infer the {receiver} instance type.
+ InstanceType receiver_instance_type;
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+ receiver_instance_type = receiver_maps[0]->instance_type();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != receiver_instance_type) {
+ return NoChange();
+ }
+ }
+ if (receiver_instance_type < collection_iterator_instance_type_first ||
+ receiver_instance_type > collection_iterator_instance_type_last) {
+ return NoChange();
+ }
+
+ // Transition the JSCollectionIterator {receiver} if necessary
+ // (i.e. there were certain mutations while we're iterating).
+ {
+ Node* done_loop;
+ Node* done_eloop;
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+
+ // Check if reached the final table of the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+ Node* next_table = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNextTable()),
+ table, effect, control);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), next_table);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Abort the {loop} when we reach the final table.
+ done_loop = graph()->NewNode(common()->IfTrue(), control);
+ done_eloop = effect;
+
+ // Migrate to the {next_table} otherwise.
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Self-heal the {receiver}s index.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kEliminatable);
+ index = effect =
+ graph()->NewNode(common()->Call(call_descriptor),
+ jsgraph()->HeapConstant(callable.code()), table, index,
+ jsgraph()->NoContextConstant(), effect);
+
+ index = effect = graph()->NewNode(
+ common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), index,
+ effect, control);
+
+ // Update the {index} and {table} on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, next_table, effect, control);
+
+ // Tie the knot.
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+
+ control = done_loop;
+ effect = done_eloop;
+ }
+
+ // Get current index and table from the JSCollectionIterator {receiver}.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+
+ // Create the {JSIteratorResult} first to ensure that we always have
+ // a dominating Allocate node for the allocation folding phase.
+ Node* iterator_result = effect = graph()->NewNode(
+ javascript()->CreateIterResultObject(), jsgraph()->UndefinedConstant(),
+ jsgraph()->TrueConstant(), context, effect);
+
+ // Look for the next non-holey key, starting from {index} in the {table}.
+ Node* controls[2];
+ Node* effects[3];
+ {
+ // Compute the currently used capacity.
+ Node* number_of_buckets = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets()),
+ table, effect, control);
+ Node* number_of_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ Node* number_of_deleted_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements()),
+ table, effect, control);
+ Node* used_capacity =
+ graph()->NewNode(simplified()->NumberAdd(), number_of_elements,
+ number_of_deleted_elements);
+
+ // Skip holes and update the {index}.
+ Node* loop = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* iloop = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
+
+ Node* index = effect = graph()->NewNode(
+ common()->TypeGuard(TypeCache::Get().kFixedArrayLengthType), iloop,
+ eloop, control);
+ {
+ Node* check0 = graph()->NewNode(simplified()->NumberLessThan(), index,
+ used_capacity);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, loop);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ {
+ // Mark the {receiver} as exhausted.
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, jsgraph()->HeapConstant(empty_collection), efalse0,
+ if_false0);
+
+ controls[0] = if_false0;
+ effects[0] = efalse0;
+ }
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ {
+ // Load the key of the entry.
+ Node* entry_start_position = graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(simplified()->NumberMultiply(), index,
+ jsgraph()->Constant(entry_size)),
+ number_of_buckets),
+ jsgraph()->Constant(OrderedHashTableBase::kHashTableStartIndex));
+ Node* entry_key = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ table, entry_start_position, etrue0, if_true0);
+
+ // Advance the index.
+ index = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(), entry_key,
+ jsgraph()->TheHoleConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+
+ {
+ // Abort loop with resulting value.
+ Node* control = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* effect = etrue0;
+ Node* value = effect =
+ graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ entry_key, effect, control);
+ Node* done = jsgraph()->FalseConstant();
+
+ // Advance the index on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+
+ // The actual {value} depends on the {receiver} iteration type.
+ switch (receiver_instance_type) {
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ break;
+
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), value,
+ value, context, effect);
+ break;
+
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ break;
+
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(),
+ entry_key, value, context, effect);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Store final {value} and {done} into the {iterator_result}.
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultValue()),
+ iterator_result, value, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultDone()),
+ iterator_result, done, effect, control);
+
+ controls[1] = control;
+ effects[1] = effect;
+ }
+
+ // Continue with next loop index.
+ loop->ReplaceInput(1, graph()->NewNode(common()->IfTrue(), branch1));
+ eloop->ReplaceInput(1, etrue0);
+ iloop->ReplaceInput(1, index);
+ }
+ }
+
+ control = effects[2] = graph()->NewNode(common()->Merge(2), 2, controls);
+ effect = graph()->NewNode(common()->EffectPhi(2), 3, effects);
+ }
+
+ // Yield the final {iterator_result}.
+ ReplaceWithValue(node, iterator_result, effect, control);
+ return Replace(iterator_result);
+}
+
+Reduction JSCallReducer::ReduceArrayBufferIsView(Node* node) {
+ Node* value = node->op()->ValueInputCount() >= 3
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, value);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->ObjectIsArrayBufferView());
+ return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
+ Node* node, InstanceType instance_type, FieldAccess const& access) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (NodeProperties::HasInstanceTypeWitness(receiver, effect, instance_type)) {
+ // Load the {receiver}s field.
+ Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+ receiver, effect, control);
+
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* check = effect =
+ graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+ receiver_buffer, effect, control);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 675cc6df83..36a5ac2e7a 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -23,6 +23,7 @@ namespace compiler {
// Forward declarations.
class CallFrequency;
class CommonOperatorBuilder;
+struct FieldAccess;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
@@ -83,10 +84,19 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayFind(Node* node, ArrayFindVariant variant,
Handle<SharedFunctionInfo> shared);
Reduction ReduceArrayEvery(Node* node, Handle<SharedFunctionInfo> shared);
+ enum class SearchVariant { kIncludes, kIndexOf };
+ Reduction ReduceArrayIndexOfIncludes(SearchVariant search_variant,
+ Node* node);
Reduction ReduceArraySome(Node* node, Handle<SharedFunctionInfo> shared);
Reduction ReduceArrayPrototypePush(Node* node);
Reduction ReduceArrayPrototypePop(Node* node);
Reduction ReduceArrayPrototypeShift(Node* node);
+ enum class ArrayIteratorKind { kArray, kTypedArray };
+ Reduction ReduceArrayIterator(Node* node, IterationKind kind);
+ Reduction ReduceArrayIteratorPrototypeNext(Node* node);
+ Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
+ IterationKind kind);
+
Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
Node* node, int arity, CallFrequency const& frequency,
VectorSlotPair const& feedback);
@@ -101,8 +111,10 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceStringPrototypeIndexOf(Node* node);
Reduction ReduceStringPrototypeSubstring(Node* node);
Reduction ReduceStringPrototypeSlice(Node* node);
+ Reduction ReduceStringPrototypeSubstr(Node* node);
Reduction ReduceStringPrototypeStringAt(
const Operator* string_access_operator, Node* node);
+ Reduction ReduceStringPrototypeCharAt(Node* node);
#ifdef V8_INTL_SUPPORT
Reduction ReduceStringPrototypeToLowerCaseIntl(Node* node);
@@ -110,8 +122,11 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
#endif // V8_INTL_SUPPORT
Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringFromCodePoint(Node* node);
Reduction ReduceStringPrototypeIterator(Node* node);
Reduction ReduceStringIteratorPrototypeNext(Node* node);
+ Reduction ReduceStringPrototypeConcat(Node* node,
+ Handle<SharedFunctionInfo> shared);
Reduction ReduceAsyncFunctionPromiseCreate(Node* node);
Reduction ReduceAsyncFunctionPromiseRelease(Node* node);
@@ -126,6 +141,10 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReducePromisePrototypeThen(Node* node);
Reduction ReducePromiseResolveTrampoline(Node* node);
+ Reduction ReduceTypedArrayConstructor(Node* node,
+ Handle<SharedFunctionInfo> shared);
+ Reduction ReduceTypedArrayPrototypeToStringTag(Node* node);
+
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceMathUnary(Node* node, const Operator* op);
@@ -134,6 +153,28 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+ Reduction ReduceNumberIsFinite(Node* node);
+ Reduction ReduceNumberIsInteger(Node* node);
+ Reduction ReduceNumberIsSafeInteger(Node* node);
+ Reduction ReduceNumberIsNaN(Node* node);
+
+ Reduction ReduceMapPrototypeHas(Node* node);
+ Reduction ReduceMapPrototypeGet(Node* node);
+ Reduction ReduceCollectionIteration(Node* node,
+ CollectionKind collection_kind,
+ IterationKind iteration_kind);
+ Reduction ReduceCollectionPrototypeSize(Node* node,
+ CollectionKind collection_kind);
+ Reduction ReduceCollectionIteratorPrototypeNext(
+ Node* node, int entry_size, Handle<HeapObject> empty_collection,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last);
+
+ Reduction ReduceArrayBufferIsView(Node* node);
+ Reduction ReduceArrayBufferViewAccessor(Node* node,
+ InstanceType instance_type,
+ FieldAccess const& access);
+
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index ca7bcdfb66..9f833b60eb 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -103,7 +103,7 @@ MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- Handle<Object> object = OpParameter<Handle<HeapObject>>(node);
+ Handle<Object> object = HeapConstantOf(node->op());
if (object->IsContext()) return Handle<Context>::cast(object);
break;
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index f535b52a27..1db4965f66 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -29,7 +29,7 @@ namespace {
// Retrieves the frame state holding actual argument values.
Node* GetArgumentsFrameState(Node* frame_state) {
Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state);
- FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+ FrameStateInfo outer_state_info = FrameStateInfoOf(outer_state->op());
return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
? outer_state
: frame_state;
@@ -137,10 +137,14 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateArray:
return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateArrayIterator:
+ return ReduceJSCreateArrayIterator(node);
case IrOpcode::kJSCreateBoundFunction:
return ReduceJSCreateBoundFunction(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
+ case IrOpcode::kJSCreateCollectionIterator:
+ return ReduceJSCreateCollectionIterator(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
case IrOpcode::kJSCreateStringIterator:
@@ -232,7 +236,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
Node* const control = graph()->start();
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> shared =
state_info.shared_info().ToHandleChecked();
@@ -357,7 +361,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
// Prepare element backing store to be used by arguments object.
bool has_aliased_arguments = false;
Node* const elements = AllocateAliasedArguments(
@@ -397,7 +401,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
// Prepare element backing store to be used by arguments object.
Node* const elements = AllocateArguments(effect, control, args_state);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
@@ -433,7 +437,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// pruned anyway.
return NoChange();
}
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ FrameStateInfo args_state_info = FrameStateInfoOf(args_state->op());
// Prepare element backing store to be used by the rest array.
Node* const elements =
AllocateRestArguments(effect, control, args_state, start_index);
@@ -491,9 +495,14 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Allocate a register file.
DCHECK(js_function->shared()->HasBytecodeArray());
- int size = js_function->shared()->bytecode_array()->register_count();
- Node* register_file = effect =
- AllocateElements(effect, control, HOLEY_ELEMENTS, size, NOT_TENURED);
+ int size = js_function->shared()->GetBytecodeArray()->register_count();
+ AllocationBuilder ab(jsgraph(), effect, control);
+ ab.AllocateArray(size, factory()->fixed_array_map());
+ for (int i = 0; i < size; ++i) {
+ ab.Store(AccessBuilder::ForFixedArraySlot(i),
+ jsgraph()->UndefinedConstant());
+ }
+ Node* register_file = effect = ab.Finish();
// Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
@@ -877,6 +886,96 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
return ReduceNewArrayToStubCall(node, site);
}
+Reduction JSCreateLowering::ReduceJSCreateArrayIterator(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, node->opcode());
+ CreateArrayIteratorParameters const& p =
+ CreateArrayIteratorParametersOf(node->op());
+ Node* iterated_object = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Create the JSArrayIterator result.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArrayIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(),
+ handle(native_context()->initial_array_iterator_map(), isolate()));
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSArrayIteratorIteratedObject(), iterated_object);
+ a.Store(AccessBuilder::ForJSArrayIteratorNextIndex(),
+ jsgraph()->ZeroConstant());
+ a.Store(AccessBuilder::ForJSArrayIteratorKind(),
+ jsgraph()->Constant(static_cast<int>(p.kind())));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+namespace {
+
+Context::Field ContextFieldForCollectionIterationKind(
+ CollectionKind collection_kind, IterationKind iteration_kind) {
+ switch (collection_kind) {
+ case CollectionKind::kSet:
+ switch (iteration_kind) {
+ case IterationKind::kKeys:
+ UNREACHABLE();
+ case IterationKind::kValues:
+ return Context::SET_VALUE_ITERATOR_MAP_INDEX;
+ case IterationKind::kEntries:
+ return Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX;
+ }
+ break;
+ case CollectionKind::kMap:
+ switch (iteration_kind) {
+ case IterationKind::kKeys:
+ return Context::MAP_KEY_ITERATOR_MAP_INDEX;
+ case IterationKind::kValues:
+ return Context::MAP_VALUE_ITERATOR_MAP_INDEX;
+ case IterationKind::kEntries:
+ return Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX;
+ }
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateCollectionIterator, node->opcode());
+ CreateCollectionIteratorParameters const& p =
+ CreateCollectionIteratorParametersOf(node->op());
+ Node* iterated_object = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Load the OrderedHashTable from the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ iterated_object, effect, control);
+
+ // Create the JSArrayIterator result.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSCollectionIterator::kSize, NOT_TENURED, Type::OtherObject());
+ a.Store(AccessBuilder::ForMap(),
+ handle(native_context()->get(ContextFieldForCollectionIterationKind(
+ p.collection_kind(), p.iteration_kind())),
+ isolate()));
+ a.Store(AccessBuilder::ForJSObjectPropertiesOrHash(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSCollectionIteratorTable(), table);
+ a.Store(AccessBuilder::ForJSCollectionIteratorIndex(),
+ jsgraph()->ZeroConstant());
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBoundFunction, node->opcode());
CreateBoundFunctionParameters const& p =
@@ -1207,7 +1306,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
default:
UNREACHABLE();
}
- a.AllocateArray(context_length, map);
+ a.AllocateContext(context_length, map);
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1226,7 +1325,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
Node* object = NodeProperties::GetValueInput(node, 0);
Node* closure = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1242,7 +1341,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
AllocationBuilder a(jsgraph(), extension, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ a.AllocateContext(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1274,8 +1373,8 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
AllocationBuilder a(jsgraph(), extension, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
- factory()->catch_context_map());
+ a.AllocateContext(Context::MIN_CONTEXT_SLOTS + 1,
+ factory()->catch_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1290,7 +1389,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
int const context_length = scope_info->ContextLength();
Node* const closure = NodeProperties::GetValueInput(node, 0);
@@ -1304,7 +1403,7 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(context_length, factory()->block_context_map());
+ a.AllocateContext(context_length, factory()->block_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -1325,7 +1424,7 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
Node* frame_state) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
@@ -1349,7 +1448,7 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
Node* frame_state,
int start_index) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
int num_elements = std::max(0, argument_count - start_index);
if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
@@ -1380,7 +1479,7 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
Node* JSCreateLowering::AllocateAliasedArguments(
Node* effect, Node* control, Node* frame_state, Node* context,
Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
int argument_count = state_info.parameter_count() - 1; // Minus receiver.
if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 00e8b73459..af245f3f63 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -50,6 +50,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreate(Node* node);
Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateArrayIterator(Node* node);
+ Reduction ReduceJSCreateCollectionIterator(Node* node);
Reduction ReduceJSCreateBoundFunction(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index aa26b33997..99c905dae3 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -14,6 +14,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/feedback-vector.h"
+#include "src/objects/scope-info.h"
namespace v8 {
namespace internal {
@@ -279,6 +281,16 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
}
+void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStoreInArrayLiteralIC);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector()));
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable =
@@ -365,6 +377,14 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
}
+void JSGenericLowering::LowerJSCreateArrayIterator(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSCreateCollectionIterator(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSCreateBoundFunction(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
@@ -430,6 +450,13 @@ void JSGenericLowering::LowerJSCreatePromise(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSCreateTypedArray(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCreateTypedArray);
+ ReplaceWithStubCall(node, callable, flags);
+}
+
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -508,7 +535,7 @@ void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ Handle<ScopeInfo> scope_info = ScopeInfoOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext);
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index c9909dcb75..c2c0960082 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -4,12 +4,12 @@
#include "src/compiler/js-inlining-heuristic.h"
-#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -62,7 +62,7 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
if (!shared->HasBytecodeArray()) return false;
// Quick check on the size of the bytecode to avoid inlining large functions.
- if (shared->bytecode_array()->length() > FLAG_max_inlined_bytecode_size) {
+ if (shared->GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
return false;
}
@@ -72,7 +72,7 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) {
// Forcibly inline small functions.
// Don't forcibly inline functions that weren't compiled yet.
- if (shared->HasBytecodeArray() && shared->bytecode_array()->length() <=
+ if (shared->HasBytecodeArray() && shared->GetBytecodeArray()->length() <=
FLAG_max_inlined_bytecode_size_small) {
return true;
}
@@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
bool can_inline = false, small_inline = true;
candidate.total_size = 0;
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> frame_shared_info;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
@@ -132,7 +132,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
if (candidate.can_inline_function[i]) {
can_inline = true;
- candidate.total_size += shared->bytecode_array()->length();
+ candidate.total_size += shared->GetBytecodeArray()->length();
}
if (!IsSmallInlineFunction(shared)) {
small_inline = false;
@@ -610,7 +610,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
: handle(candidate.functions[0]->shared());
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += shared->bytecode_array()->length();
+ cumulative_count_ += shared->GetBytecodeArray()->length();
}
return reduction;
}
@@ -679,7 +679,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
- cumulative_count_ += function->shared()->bytecode_array()->length();
+ cumulative_count_ += function->shared()->GetBytecodeArray()->length();
}
}
}
@@ -720,7 +720,7 @@ void JSInliningHeuristic::PrintCandidates() {
candidate.functions[i].is_null()
? candidate.shared_info
: handle(candidate.functions[i]->shared());
- PrintF(" - size:%d, name: %s\n", shared->bytecode_array()->length(),
+ PrintF(" - size:%d, name: %s\n", shared->GetBytecodeArray()->length(),
shared->DebugName()->ToCString().get());
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index f4f24f41b4..af004011e3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -15,7 +15,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
public:
enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
- CompilationInfo* info, JSGraph* jsgraph,
+ OptimizedCompilationInfo* info, JSGraph* jsgraph,
SourcePositionTable* source_positions)
: AdvancedReducer(editor),
mode_(mode),
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index a995b038a8..139fe9afb8 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -5,7 +5,6 @@
#include "src/compiler/js-inlining.h"
#include "src/ast/ast.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
@@ -18,6 +17,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/isolate-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/parsing/parse-info.h"
namespace v8 {
@@ -261,22 +261,13 @@ namespace {
// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
- Isolate* const isolate = shared_info->GetIsolate();
- Code* const construct_stub = shared_info->construct_stub();
- if (construct_stub == *isolate->builtins()->JSConstructStubGeneric()) {
+ if (!shared_info->construct_as_builtin()) {
return !IsDerivedConstructor(shared_info->kind());
} else {
return false;
}
}
-bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
- DisallowHeapAllocation no_gc;
- Isolate* const isolate = shared_info->GetIsolate();
- Code* const construct_stub = shared_info->construct_stub();
- return construct_stub == *BUILTIN_CODE(isolate, ConstructedNonConstructable);
-}
-
} // namespace
// Determines whether the call target of the given call {node} is statically
@@ -402,7 +393,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
- IsNonConstructible(shared_info)) {
+ !IsConstructable(shared_info->kind())) {
TRACE("Not inlining %s into %s because constructor is not constructable.\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -496,7 +487,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
BytecodeGraphBuilder graph_builder(
zone(), shared_info, feedback_vector, BailoutId::None(), jsgraph(),
call.frequency(), source_positions_, native_context(), inlining_id,
- flags, false);
+ flags, false, info_->is_analyze_environment_liveness());
graph_builder.CreateGraph();
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index d078413b47..5fca638daf 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
class BailoutId;
-class CompilationInfo;
+class OptimizedCompilationInfo;
namespace compiler {
@@ -23,7 +23,7 @@ class SourcePositionTable;
// heuristics that decide what and how much to inline are beyond its scope.
class JSInliner final : public AdvancedReducer {
public:
- JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
+ JSInliner(Editor* editor, Zone* local_zone, OptimizedCompilationInfo* info,
JSGraph* jsgraph, SourcePositionTable* source_positions)
: AdvancedReducer(editor),
local_zone_(local_zone),
@@ -50,7 +50,7 @@ class JSInliner final : public AdvancedReducer {
Handle<Context> native_context() const;
Zone* const local_zone_;
- CompilationInfo* info_;
+ OptimizedCompilationInfo* info_;
JSGraph* const jsgraph_;
SourcePositionTable* const source_positions_;
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 35e0a551db..60e47e025f 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -26,6 +26,12 @@ namespace v8 {
namespace internal {
namespace compiler {
+// This is needed for gc_mole which will compile this file without the full set
+// of GN defined macros.
+#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
+#endif
+
namespace {
bool HasNumberMaps(MapHandles const& maps) {
@@ -98,6 +104,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSStoreNamedOwn(node);
case IrOpcode::kJSStoreDataPropertyInLiteral:
return ReduceJSStoreDataPropertyInLiteral(node);
+ case IrOpcode::kJSStoreInArrayLiteral:
+ return ReduceJSStoreInArrayLiteral(node);
default:
break;
}
@@ -1126,7 +1134,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
AccessMode access_mode, KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty);
+ node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSStoreInArrayLiteral);
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1644,7 +1653,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
Node* target = jsgraph()->Constant(access_info.constant());
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> shared_info =
frame_info.shared_info().ToHandleChecked();
// Introduce the call to the getter function.
@@ -1683,7 +1692,7 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
Node* target = jsgraph()->Constant(access_info.constant());
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
Handle<SharedFunctionInfo> shared_info =
frame_info.shared_info().ToHandleChecked();
// Introduce the call to the setter function.
@@ -2091,6 +2100,45 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
return Replace(value);
}
+Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
+ FeedbackParameter const& p = FeedbackParameterOf(node->op());
+ Node* const receiver = NodeProperties::GetValueInput(node, 0);
+ Node* const index = NodeProperties::GetValueInput(node, 1);
+ Node* const value = NodeProperties::GetValueInput(node, 2);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+
+ // Extract receiver maps from the keyed store IC using the FeedbackNexus.
+ if (!p.feedback().IsValid()) return NoChange();
+ FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+ // Extract the keyed access store mode from the keyed store IC.
+ KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
+
+ // Extract receiver maps from the {nexus}.
+ MapHandles receiver_maps;
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ return NoChange();
+ } else if (receiver_maps.empty()) {
+ if (flags() & kBailoutOnUninitialized) {
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
+ }
+ return NoChange();
+ }
+ DCHECK(!nexus.IsUninitialized());
+ DCHECK_EQ(ELEMENT, nexus.GetKeyType());
+
+ if (nexus.ic_state() == MEGAMORPHIC) return NoChange();
+
+ // Try to lower the element access based on the {receiver_maps}.
+ return ReduceElementAccess(node, index, value, receiver_maps,
+ AccessMode::kStoreInLiteral, STANDARD_LOAD,
+ store_mode);
+}
+
namespace {
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -2113,7 +2161,6 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
ElementAccessInfo const& access_info, AccessMode access_mode,
KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
- DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
@@ -2161,11 +2208,21 @@ JSNativeContextSpecialization::BuildElementAccess(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
receiver, effect, control);
- // Load the base and external pointer for the {receiver}s {elements}.
- base_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, effect, control);
+ // Load the base pointer for the {receiver}. This will always be Smi
+ // zero unless we allow on-heap TypedArrays, which is only the case
+ // for Chrome. Node and Electron both set this limit to 0. Setting
+ // the base to Smi zero here allows the EffectControlLinearizer to
+ // optimize away the tricky part of the access later.
+ if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) {
+ base_pointer = jsgraph()->ZeroConstant();
+ } else {
+ base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, effect, control);
+ }
+
+ // Load the external pointer for the {receiver}s {elements}.
external_pointer = effect = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
@@ -2189,13 +2246,11 @@ JSNativeContextSpecialization::BuildElementAccess(
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- // Only check that the {index} is in Signed32 range. We do the actual
+ // Only check that the {index} is in SignedSmall range. We do the actual
// bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
index = effect = graph()->NewNode(
- simplified()->SpeculativeToNumber(NumberOperationHint::kSigned32,
- VectorSlotPair()),
- index, effect, control);
+ simplified()->CheckSmi(VectorSlotPair()), index, effect, control);
// Cast the {index} to Unsigned32 range, so that the bounds checks
// below are performed on unsigned values, which means that all the
@@ -2346,7 +2401,8 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check if we might need to grow the {elements} backing store.
if (IsGrowStoreMode(store_mode)) {
// For growing stores we validate the {index} below.
- DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
} else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
// Check that the {index} is a valid array index, we do the actual
@@ -2478,7 +2534,8 @@ JSNativeContextSpecialization::BuildElementAccess(
}
}
} else {
- DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
if (IsSmiElementsKind(elements_kind)) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
@@ -2606,8 +2663,10 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue;
- Node* vtrue = etrue = graph()->NewNode(
- simplified()->StringCharAt(), receiver, masked_index, *effect, if_true);
+ Node* vtrue = etrue =
+ graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_index, *effect, if_true);
+ vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* vfalse = jsgraph()->UndefinedConstant();
@@ -2628,8 +2687,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
// Return the character from the {receiver} as single character string.
Node* value = *effect =
- graph()->NewNode(simplified()->StringCharAt(), receiver, masked_index,
- *effect, *control);
+ graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_index, *effect, *control);
+ value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}
}
@@ -2685,6 +2745,10 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
}
Node* new_length_and_hash = graph()->NewNode(
simplified()->NumberBitwiseOr(), jsgraph()->Constant(new_length), hash);
+ // TDOO(jarin): Fix the typer to infer tighter bound for NumberBitwiseOr.
+ new_length_and_hash = effect =
+ graph()->NewNode(common()->TypeGuard(Type::SignedSmall()),
+ new_length_and_hash, effect, control);
// Allocate and initialize the new properties.
AllocationBuilder a(jsgraph(), effect, control);
@@ -2739,11 +2803,20 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
MapHandles* receiver_maps) {
DCHECK_EQ(0, receiver_maps->size());
if (nexus.IsUninitialized()) return true;
- // See if we can infer a concrete type for the {receiver}.
- if (InferReceiverMaps(receiver, effect, receiver_maps)) {
- // We can assume that the {receiver} still has the inferred {receiver_maps}.
- return true;
+
+ // See if we can infer a concrete type for the {receiver}. Solely relying on
+ // the inference is not safe for keyed stores, because we would potentially
+ // miss out on transitions that need to be performed.
+ {
+ FeedbackSlotKind kind = nexus.kind();
+ bool use_inference =
+ !IsKeyedStoreICKind(kind) && !IsStoreInArrayLiteralICKind(kind);
+ if (use_inference && InferReceiverMaps(receiver, effect, receiver_maps)) {
+ // We can assume that {receiver} still has the inferred {receiver_maps}.
+ return true;
+ }
}
+
// Try to extract some maps from the {nexus}.
if (nexus.ExtractMaps(receiver_maps) != 0) {
// Try to filter impossible candidates based on inferred root map.
@@ -2760,6 +2833,7 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
}
return true;
}
+
return false;
}
@@ -2852,6 +2926,8 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
return jsgraph()->simplified();
}
+#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 6df48d6e23..5f357a2924 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -73,6 +73,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSStoreProperty(Node* node);
Reduction ReduceJSStoreNamedOwn(Node* node);
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
+ Reduction ReduceJSStoreInArrayLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandles const& receiver_maps,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 31be6d9979..5159517063 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -258,7 +258,8 @@ std::ostream& operator<<(std::ostream& os, FeedbackParameter const& p) {
FeedbackParameter const& FeedbackParameterOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSCreateEmptyLiteralArray ||
op->opcode() == IrOpcode::kJSInstanceOf ||
- op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+ op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral ||
+ op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
return OpParameter<FeedbackParameter>(op);
}
@@ -411,12 +412,63 @@ std::ostream& operator<<(std::ostream& os, CreateArrayParameters const& p) {
return os;
}
-
const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSCreateArray, op->opcode());
return OpParameter<CreateArrayParameters>(op);
}
+bool operator==(CreateArrayIteratorParameters const& lhs,
+ CreateArrayIteratorParameters const& rhs) {
+ return lhs.kind() == rhs.kind();
+}
+
+bool operator!=(CreateArrayIteratorParameters const& lhs,
+ CreateArrayIteratorParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateArrayIteratorParameters const& p) {
+ return static_cast<size_t>(p.kind());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateArrayIteratorParameters const& p) {
+ return os << p.kind();
+}
+
+const CreateArrayIteratorParameters& CreateArrayIteratorParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, op->opcode());
+ return OpParameter<CreateArrayIteratorParameters>(op);
+}
+
+bool operator==(CreateCollectionIteratorParameters const& lhs,
+ CreateCollectionIteratorParameters const& rhs) {
+ return lhs.collection_kind() == rhs.collection_kind() &&
+ lhs.iteration_kind() == rhs.iteration_kind();
+}
+
+bool operator!=(CreateCollectionIteratorParameters const& lhs,
+ CreateCollectionIteratorParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateCollectionIteratorParameters const& p) {
+ return base::hash_combine(static_cast<size_t>(p.collection_kind()),
+ static_cast<size_t>(p.iteration_kind()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateCollectionIteratorParameters const& p) {
+ return os << p.collection_kind() << " " << p.iteration_kind();
+}
+
+const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateCollectionIterator, op->opcode());
+ return OpParameter<CreateCollectionIteratorParameters>(op);
+}
+
bool operator==(CreateBoundFunctionParameters const& lhs,
CreateBoundFunctionParameters const& rhs) {
return lhs.arity() == rhs.arity() &&
@@ -573,6 +625,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(CreateStringIterator, Operator::kEliminatable, 1, 1) \
V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(CreatePromise, Operator::kEliminatable, 0, 1) \
+ V(CreateTypedArray, Operator::kNoProperties, 5, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
@@ -745,6 +798,17 @@ const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::StoreInArrayLiteral(
+ const VectorSlotPair& feedback) {
+ FeedbackParameter parameters(feedback);
+ return new (zone()) Operator1<FeedbackParameter>( // --
+ IrOpcode::kJSStoreInArrayLiteral,
+ Operator::kNoThrow, // opcode
+ "JSStoreInArrayLiteral", // name
+ 3, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
uint32_t start_index) {
CallForwardVarargsParameters parameters(arity, start_index);
@@ -911,6 +975,11 @@ const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
register_count); // parameter
}
+int GeneratorStoreRegisterCountOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorStore, op->opcode());
+ return OpParameter<int>(op);
+}
+
const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
return new (zone()) Operator1<int>( // --
IrOpcode::kJSGeneratorRestoreRegister, Operator::kNoThrow, // opcode
@@ -919,6 +988,11 @@ const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
index); // parameter
}
+int RestoreRegisterIndexOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreRegister, op->opcode());
+ return OpParameter<int>(op);
+}
+
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
Handle<Name> name,
VectorSlotPair const& feedback) {
@@ -1049,6 +1123,24 @@ const Operator* JSOperatorBuilder::CreateArray(size_t arity,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CreateArrayIterator(IterationKind kind) {
+ CreateArrayIteratorParameters parameters(kind);
+ return new (zone()) Operator1<CreateArrayIteratorParameters>( // --
+ IrOpcode::kJSCreateArrayIterator, Operator::kEliminatable, // opcode
+ "JSCreateArrayIterator", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
+const Operator* JSOperatorBuilder::CreateCollectionIterator(
+ CollectionKind collection_kind, IterationKind iteration_kind) {
+ CreateCollectionIteratorParameters parameters(collection_kind,
+ iteration_kind);
+ return new (zone()) Operator1<CreateCollectionIteratorParameters>(
+ IrOpcode::kJSCreateCollectionIterator, Operator::kEliminatable,
+ "JSCreateCollectionIterator", 1, 1, 1, 1, 1, 0, parameters);
+}
+
const Operator* JSOperatorBuilder::CreateBoundFunction(size_t arity,
Handle<Map> map) {
// bound_target_function, bound_this, arg1, ..., argN
@@ -1170,6 +1262,12 @@ const Operator* JSOperatorBuilder::CreateBlockContext(
scope_info); // parameter
}
+Handle<ScopeInfo> ScopeInfoOf(const Operator* op) {
+ DCHECK(IrOpcode::kJSCreateBlockContext == op->opcode() ||
+ IrOpcode::kJSCreateWithContext == op->opcode());
+ return OpParameter<Handle<ScopeInfo>>(op);
+}
+
#undef BINARY_OP_LIST
#undef CACHED_OP_LIST
#undef COMPARE_OP_LIST
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 959a83026c..98e67a087c 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -58,7 +58,7 @@ class CallFrequency final {
std::ostream& operator<<(std::ostream&, CallFrequency);
-CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
+CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
@@ -93,7 +93,7 @@ std::ostream& operator<<(std::ostream&,
ConstructForwardVarargsParameters const&);
ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
- Operator const*) WARN_UNUSED_RESULT;
+ Operator const*) V8_WARN_UNUSED_RESULT;
// Defines the arity and the feedback for a JavaScript constructor call. This is
// used as a parameter by JSConstruct and JSConstructWithSpread operators.
@@ -154,7 +154,7 @@ class CallForwardVarargsParameters final {
std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
- Operator const*) WARN_UNUSED_RESULT;
+ Operator const*) V8_WARN_UNUSED_RESULT;
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCall and JSCallWithSpread operators.
@@ -260,7 +260,7 @@ size_t hash_value(ContextAccess const&);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ContextAccess const&);
-ContextAccess const& ContextAccessOf(Operator const*);
+V8_EXPORT_PRIVATE ContextAccess const& ContextAccessOf(Operator const*);
// Defines the name and ScopeInfo for a new catch context. This is used as a
// parameter by the JSCreateCatchContext operator.
@@ -502,6 +502,62 @@ std::ostream& operator<<(std::ostream&, CreateArrayParameters const&);
const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
+// Defines shared information for the array iterator that should be created.
+// This is used as parameter by JSCreateArrayIterator operators.
+class CreateArrayIteratorParameters final {
+ public:
+ explicit CreateArrayIteratorParameters(IterationKind kind) : kind_(kind) {}
+
+ IterationKind kind() const { return kind_; }
+
+ private:
+ IterationKind const kind_;
+};
+
+bool operator==(CreateArrayIteratorParameters const&,
+ CreateArrayIteratorParameters const&);
+bool operator!=(CreateArrayIteratorParameters const&,
+ CreateArrayIteratorParameters const&);
+
+size_t hash_value(CreateArrayIteratorParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArrayIteratorParameters const&);
+
+const CreateArrayIteratorParameters& CreateArrayIteratorParametersOf(
+ const Operator* op);
+
+// Defines shared information for the array iterator that should be created.
+// This is used as parameter by JSCreateCollectionIterator operators.
+class CreateCollectionIteratorParameters final {
+ public:
+ explicit CreateCollectionIteratorParameters(CollectionKind collection_kind,
+ IterationKind iteration_kind)
+ : collection_kind_(collection_kind), iteration_kind_(iteration_kind) {
+ CHECK(!(collection_kind == CollectionKind::kSet &&
+ iteration_kind == IterationKind::kKeys));
+ }
+
+ CollectionKind collection_kind() const { return collection_kind_; }
+ IterationKind iteration_kind() const { return iteration_kind_; }
+
+ private:
+ CollectionKind const collection_kind_;
+ IterationKind const iteration_kind_;
+};
+
+bool operator==(CreateCollectionIteratorParameters const&,
+ CreateCollectionIteratorParameters const&);
+bool operator!=(CreateCollectionIteratorParameters const&,
+ CreateCollectionIteratorParameters const&);
+
+size_t hash_value(CreateCollectionIteratorParameters const&);
+
+std::ostream& operator<<(std::ostream&,
+ CreateCollectionIteratorParameters const&);
+
+const CreateCollectionIteratorParameters& CreateCollectionIteratorParametersOf(
+ const Operator* op);
+
// Defines shared information for the bound function that should be created.
// This is used as parameter by JSCreateBoundFunction operators.
class CreateBoundFunctionParameters final {
@@ -606,12 +662,17 @@ size_t hash_value(ForInMode);
std::ostream& operator<<(std::ostream&, ForInMode);
-ForInMode ForInModeOf(Operator const* op) WARN_UNUSED_RESULT;
+ForInMode ForInModeOf(Operator const* op) V8_WARN_UNUSED_RESULT;
BinaryOperationHint BinaryOperationHintOf(const Operator* op);
CompareOperationHint CompareOperationHintOf(const Operator* op);
+int GeneratorStoreRegisterCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+int RestoreRegisterIndexOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+
+Handle<ScopeInfo> ScopeInfoOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
@@ -656,6 +717,8 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
+ const Operator* CreateArrayIterator(IterationKind);
+ const Operator* CreateCollectionIterator(CollectionKind, IterationKind);
const Operator* CreateBoundFunction(size_t arity, Handle<Map> map);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackCell> feedback_cell,
@@ -665,6 +728,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateStringIterator();
const Operator* CreateKeyValueArray();
const Operator* CreatePromise();
+ const Operator* CreateTypedArray();
const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
VectorSlotPair const& feedback,
int literal_flags, int number_of_elements);
@@ -714,6 +778,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreNamedOwn(Handle<Name> name,
VectorSlotPair const& feedback);
const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
+ const Operator* StoreInArrayLiteral(const VectorSlotPair& feedback);
const Operator* DeleteProperty();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index fac87bc685..d97e1dcbc7 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -477,7 +477,8 @@ JSTypeHintLowering::ReduceStoreKeyedOperation(const Operator* op, Node* obj,
Node* key, Node* val,
Node* effect, Node* control,
FeedbackSlot slot) const {
- DCHECK_EQ(IrOpcode::kJSStoreProperty, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kJSStoreProperty ||
+ op->opcode() == IrOpcode::kJSStoreInArrayLiteral);
DCHECK(!slot.IsInvalid());
FeedbackNexus nexus(feedback_vector(), slot);
if (Node* node = TryBuildSoftDeopt(
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index b3cd43ff71..98e18bc590 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -1370,7 +1370,7 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- int32_t cell_index = OpParameter<int32_t>(node);
+ int32_t cell_index = OpParameter<int32_t>(node->op());
Node* module = NodeProperties::GetValueInput(node, 0);
Type* module_type = NodeProperties::GetType(module);
@@ -1420,8 +1420,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* value = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node)),
- ModuleDescriptor::kExport);
+ DCHECK_EQ(
+ ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node->op())),
+ ModuleDescriptor::kExport);
Node* cell = BuildGetModuleCell(node);
if (cell->op()->EffectOutputCount() > 0) effect = cell;
@@ -1527,7 +1528,12 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
// Check if {target} is a JSFunction.
- if (target_type->Is(Type::Function())) {
+ if (target_type->IsHeapConstant() &&
+ target_type->AsHeapConstant()->Value()->IsJSFunction()) {
+ // Only optimize [[Construct]] here if {function} is a Constructor.
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
+ if (!function->IsConstructor()) return NoChange();
// Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->RemoveInput(arity + 1);
@@ -1555,8 +1561,6 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Type* target_type = NodeProperties::GetType(target);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
// Check if {target} is a known JSFunction.
if (target_type->IsHeapConstant() &&
@@ -1564,58 +1568,30 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
- const int builtin_index = shared->construct_stub()->builtin_index();
- const bool is_builtin = (builtin_index != -1);
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ // Only optimize [[Construct]] here if {function} is a Constructor.
+ if (!function->IsConstructor()) return NoChange();
- if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
- !NeedsArgumentAdaptorFrame(shared, arity)) {
- // Patch {node} to a direct CEntryStub call.
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- // Load the context from the {target}.
- Node* context = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSFunctionContext()),
- target, effect, control);
- NodeProperties::ReplaceContextInput(node, context);
+ // Patch {node} to an indirect call via the {function}s construct stub.
+ bool use_builtin_construct_stub = shared->construct_as_builtin();
- // Update the effect dependency for the {node}.
- NodeProperties::ReplaceEffectInput(node, effect);
+ Handle<Code> code = use_builtin_construct_stub
+ ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
+ : isolate()->builtins()->JSConstructStubGeneric();
- ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
- } else {
- // Patch {node} to an indirect call via the {function}s construct stub.
- Callable callable(handle(shared->construct_stub(), isolate()),
- ConstructStubDescriptor(isolate()));
- node->RemoveInput(arity + 1);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(
- node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(),
- 1 + arity, flags)));
- }
- return Changed(node);
- }
-
- // Check if {target} is a JSFunction.
- if (target_type->Is(Type::Function())) {
- // Patch {node} to an indirect call via the ConstructFunction builtin.
- Callable callable = CodeFactory::ConstructFunction(isolate());
node->RemoveInput(arity + 1);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 0, jsgraph()->HeapConstant(code));
node->InsertInput(graph()->zone(), 2, new_target);
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
- CallDescriptor::kNeedsFrameState)));
+ isolate(), graph()->zone(),
+ ConstructStubDescriptor(isolate()), 1 + arity, flags)));
+
return Changed(node);
}
@@ -1682,9 +1658,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
return NoChange();
}
- const int builtin_index = shared->code()->builtin_index();
- const bool is_builtin = (builtin_index != -1);
-
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (IsClassConstructor(shared->kind())) return NoChange();
@@ -1728,9 +1701,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node, common()->Call(Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(),
1 + arity, flags)));
- } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
+ } else if (shared->HasBuiltinId() &&
+ Builtins::HasCppImplementation(shared->builtin_id())) {
// Patch {node} to a direct CEntryStub call.
- ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
+ ReduceBuiltin(isolate(), jsgraph(), node, shared->builtin_id(), arity,
+ flags);
} else {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), arity + 2, new_target);
@@ -2038,7 +2013,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- int register_count = OpParameter<int>(node);
+ int register_count = GeneratorStoreRegisterCountOf(node->op());
FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
@@ -2109,7 +2084,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
Node* generator = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- int index = OpParameter<int>(node);
+ int index = RestoreRegisterIndexOf(node->op());
FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 7ccad439d9..d92e48848e 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -6,12 +6,12 @@
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
-#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -135,7 +135,8 @@ int CallDescriptor::CalculateFixedFrameSize() const {
UNREACHABLE();
}
-CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
+CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
+ OptimizedCompilationInfo* info) {
DCHECK(!info->IsStub());
if (!info->closure().is_null()) {
// If we are compiling a JS function, use a JS call descriptor,
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 5b08bc7f7c..d07e86d5b5 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -20,7 +20,7 @@ namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
-class CompilationInfo;
+class OptimizedCompilationInfo;
namespace compiler {
@@ -364,7 +364,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
- static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
+ static CallDescriptor* ComputeIncoming(Zone* zone,
+ OptimizedCompilationInfo* info);
// The call descriptor for this compilation unit describes the locations
// of incoming parameters and the outgoing return value(s).
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index a3b0eda15f..0dff4e0f5d 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -8,7 +8,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -329,6 +329,7 @@ void LoadElimination::AbstractElements::Print() const {
Node* LoadElimination::AbstractField::Lookup(Node* object) const {
for (auto pair : info_for_node_) {
+ if (pair.first->IsDead()) continue;
if (MustAlias(object, pair.first)) return pair.second.value;
}
return nullptr;
@@ -364,6 +365,7 @@ LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
const AliasStateInfo& alias_info, MaybeHandle<Name> name,
Zone* zone) const {
for (auto pair : this->info_for_node_) {
+ if (pair.first->IsDead()) continue;
if (alias_info.MayAlias(pair.first)) {
AbstractField* that = new (zone) AbstractField(zone);
for (auto pair : this->info_for_node_) {
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 60ae16f152..f853c8f953 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -159,6 +159,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
for (auto this_it : this->info_for_node_) {
Node* this_object = this_it.first;
Field this_second = this_it.second;
+ if (this_object->IsDead()) continue;
auto that_it = that->info_for_node_.find(this_object);
if (that_it != that->info_for_node_.end() &&
that_it->second == this_second) {
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 97d712f125..c1938ca304 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -239,7 +239,12 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
}
// TODO(jarin) Support both sides.
- if (arith->InputAt(0) != phi) return nullptr;
+ Node* input = arith->InputAt(0);
+ if (input->opcode() == IrOpcode::kSpeculativeToNumber ||
+ input->opcode() == IrOpcode::kJSToNumber) {
+ input = input->InputAt(0);
+ }
+ if (input != phi) return nullptr;
Node* effect_phi = nullptr;
for (Node* use : loop->uses()) {
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 0c59453b41..4418a2813f 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -113,19 +113,20 @@ class MachineRepresentationInferrer {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
- case IrOpcode::kSpeculationPoison:
+ case IrOpcode::kLoadRootsPointer:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
case IrOpcode::kUnalignedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
- UnalignedLoadRepresentationOf(node->op()).representation());
+ LoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kPhi:
representation_vector_[node->id()] =
@@ -174,6 +175,7 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kPoisonOnSpeculationTagged:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
case IrOpcode::kExternalConstant:
@@ -181,6 +183,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kPoisonOnSpeculationWord:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -331,10 +334,12 @@ class MachineRepresentationChecker {
break;
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
+ case IrOpcode::kPoisonOnSpeculationWord:
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kPoisonOnSpeculationTagged:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kTruncateFloat64ToWord32:
@@ -461,6 +466,7 @@ class MachineRepresentationChecker {
break;
case IrOpcode::kLoad:
case IrOpcode::kWord32AtomicLoad:
+ case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index c091146f1d..f26176fd80 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -38,7 +38,9 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
IrOpcode::kWord32AtomicLoad == op->opcode() ||
- IrOpcode::kPoisonedLoad == op->opcode());
+ IrOpcode::kWord64AtomicLoad == op->opcode() ||
+ IrOpcode::kPoisonedLoad == op->opcode() ||
+ IrOpcode::kUnalignedLoad == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
@@ -49,11 +51,6 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
return OpParameter<StoreRepresentation>(op);
}
-UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kUnalignedLoad, op->opcode());
- return OpParameter<UnalignedLoadRepresentation>(op);
-}
-
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@@ -82,7 +79,8 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kWord32AtomicStore, op->opcode());
+ DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
+ IrOpcode::kWord64AtomicStore == op->opcode());
return OpParameter<MachineRepresentation>(op);
}
@@ -225,10 +223,10 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
- V(SpeculationPoison, Operator::kNoProperties, 0, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadRootsPointer, Operator::kNoProperties, 0, 0, 1) \
V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
V(Int32PairSub, Operator::kNoProperties, 4, 0, 2) \
V(Int32PairMul, Operator::kNoProperties, 4, 0, 2) \
@@ -401,11 +399,21 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(Int32) \
V(Uint32)
+#define ATOMIC64_TYPE_LIST(V) \
+ V(Uint8) \
+ V(Uint16) \
+ V(Uint32) \
+ V(Uint64)
+
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
V(kWord32)
+#define ATOMIC64_REPRESENTATION_LIST(V) \
+ ATOMIC_REPRESENTATION_LIST(V) \
+ V(kWord64)
+
#define SIMD_LANE_OP_LIST(V) \
V(F32x4, 4) \
V(I32x4, 4) \
@@ -470,9 +478,9 @@ struct MachineOperatorGlobalCache {
"PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
struct UnalignedLoad##Type##Operator final \
- : public Operator1<UnalignedLoadRepresentation> { \
+ : public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
- : Operator1<UnalignedLoadRepresentation>( \
+ : Operator1<LoadRepresentation>( \
IrOpcode::kUnalignedLoad, \
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
@@ -575,6 +583,19 @@ struct MachineOperatorGlobalCache {
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
+#define ATOMIC_LOAD(Type) \
+ struct Word64AtomicLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ Word64AtomicLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kWord64AtomicLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+ ATOMIC64_TYPE_LIST(ATOMIC_LOAD)
+#undef ATOMIC_LOAD
+
#define ATOMIC_STORE(Type) \
struct Word32AtomicStore##Type##Operator \
: public Operator1<MachineRepresentation> { \
@@ -589,6 +610,20 @@ struct MachineOperatorGlobalCache {
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
+#define ATOMIC_STORE(Type) \
+ struct Word64AtomicStore##Type##Operator \
+ : public Operator1<MachineRepresentation> { \
+ Word64AtomicStore##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
+ MachineRepresentation::Type) {} \
+ }; \
+ Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
+#undef ATOMIC_STORE
+
#define ATOMIC_OP(op, type) \
struct op##type##Operator : public Operator1<MachineType> { \
op##type##Operator() \
@@ -597,15 +632,24 @@ struct MachineOperatorGlobalCache {
3, 1, 1, 1, 1, 0, MachineType::type()) {} \
}; \
op##type##Operator k##op##type;
-#define ATOMIC_OP_LIST(type) \
- ATOMIC_OP(Word32AtomicExchange, type) \
- ATOMIC_OP(Word32AtomicAdd, type) \
- ATOMIC_OP(Word32AtomicSub, type) \
- ATOMIC_OP(Word32AtomicAnd, type) \
- ATOMIC_OP(Word32AtomicOr, type) \
- ATOMIC_OP(Word32AtomicXor, type)
+#define ATOMIC_OP_LIST(type) \
+ ATOMIC_OP(Word32AtomicAdd, type) \
+ ATOMIC_OP(Word32AtomicSub, type) \
+ ATOMIC_OP(Word32AtomicAnd, type) \
+ ATOMIC_OP(Word32AtomicOr, type) \
+ ATOMIC_OP(Word32AtomicXor, type) \
+ ATOMIC_OP(Word32AtomicExchange, type)
ATOMIC_TYPE_LIST(ATOMIC_OP_LIST)
#undef ATOMIC_OP_LIST
+#define ATOMIC64_OP_LIST(type) \
+ ATOMIC_OP(Word64AtomicAdd, type) \
+ ATOMIC_OP(Word64AtomicSub, type) \
+ ATOMIC_OP(Word64AtomicAnd, type) \
+ ATOMIC_OP(Word64AtomicOr, type) \
+ ATOMIC_OP(Word64AtomicXor, type) \
+ ATOMIC_OP(Word64AtomicExchange, type)
+ ATOMIC64_TYPE_LIST(ATOMIC64_OP_LIST)
+#undef ATOMIC64_OP_LIST
#undef ATOMIC_OP
#define ATOMIC_COMPARE_EXCHANGE(Type) \
@@ -622,6 +666,20 @@ struct MachineOperatorGlobalCache {
ATOMIC_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
+#define ATOMIC_COMPARE_EXCHANGE(Type) \
+ struct Word64AtomicCompareExchange##Type##Operator \
+ : public Operator1<MachineType> { \
+ Word64AtomicCompareExchange##Type##Operator() \
+ : Operator1<MachineType>(IrOpcode::kWord64AtomicCompareExchange, \
+ Operator::kNoDeopt | Operator::kNoThrow, \
+ "Word64AtomicCompareExchange", 4, 1, 1, 1, 1, \
+ 0, MachineType::Type()) {} \
+ }; \
+ Word64AtomicCompareExchange##Type##Operator \
+ kWord64AtomicCompareExchange##Type;
+ ATOMIC64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
+#undef ATOMIC_COMPARE_EXCHANGE
+
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -643,6 +701,22 @@ struct MachineOperatorGlobalCache {
};
BitcastTaggedToWordOperator kBitcastTaggedToWord;
+ struct PoisonOnSpeculationTagged : public Operator {
+ PoisonOnSpeculationTagged()
+ : Operator(IrOpcode::kPoisonOnSpeculationTagged,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "PoisonOnSpeculationTagged", 1, 1, 1, 1, 1, 0) {}
+ };
+ PoisonOnSpeculationTagged kPoisonOnSpeculationTagged;
+
+ struct PoisonOnSpeculationWord : public Operator {
+ PoisonOnSpeculationWord()
+ : Operator(IrOpcode::kPoisonOnSpeculationWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "PoisonOnSpeculationWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ PoisonOnSpeculationWord kPoisonOnSpeculationWord;
+
struct SpeculationFenceOperator : public Operator {
SpeculationFenceOperator()
: Operator(IrOpcode::kSpeculationFence, Operator::kNoThrow,
@@ -693,8 +767,7 @@ MachineOperatorBuilder::MachineOperatorBuilder(
word == MachineRepresentation::kWord64);
}
-const Operator* MachineOperatorBuilder::UnalignedLoad(
- UnalignedLoadRepresentation rep) {
+const Operator* MachineOperatorBuilder::UnalignedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kUnalignedLoad##Type; \
@@ -944,6 +1017,107 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType rep) {
UNREACHABLE();
}
+const Operator* MachineOperatorBuilder::PoisonOnSpeculationTagged() {
+ return &cache_.kPoisonOnSpeculationTagged;
+}
+
+const Operator* MachineOperatorBuilder::PoisonOnSpeculationWord() {
+ return &cache_.kPoisonOnSpeculationWord;
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicLoad(
+ LoadRepresentation rep) {
+#define LOAD(Type) \
+ if (rep == MachineType::Type()) { \
+ return &cache_.kWord64AtomicLoad##Type; \
+ }
+ ATOMIC64_TYPE_LIST(LOAD)
+#undef LOAD
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicStore(
+ MachineRepresentation rep) {
+#define STORE(kRep) \
+ if (rep == MachineRepresentation::kRep) { \
+ return &cache_.kWord64AtomicStore##kRep; \
+ }
+ ATOMIC64_REPRESENTATION_LIST(STORE)
+#undef STORE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicAdd(MachineType rep) {
+#define ADD(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicAdd##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(ADD)
+#undef ADD
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicSub(MachineType rep) {
+#define SUB(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicSub##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(SUB)
+#undef SUB
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicAnd(MachineType rep) {
+#define AND(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicAnd##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(AND)
+#undef AND
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicOr(MachineType rep) {
+#define OR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicOr##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(OR)
+#undef OR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicXor(MachineType rep) {
+#define XOR(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicXor##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(XOR)
+#undef XOR
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicExchange(MachineType rep) {
+#define EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicExchange##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(EXCHANGE)
+#undef EXCHANGE
+ UNREACHABLE();
+}
+
+const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
+ MachineType rep) {
+#define COMPARE_EXCHANGE(kRep) \
+ if (rep == MachineType::kRep()) { \
+ return &cache_.kWord64AtomicCompareExchange##kRep; \
+ }
+ ATOMIC64_TYPE_LIST(COMPARE_EXCHANGE)
+#undef COMPARE_EXCHANGE
+ UNREACHABLE();
+}
+
const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
return OptionalOperator(flags_ & kSpeculationFence,
&cache_.kSpeculationFence);
@@ -1006,7 +1180,9 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
#undef MACHINE_TYPE_LIST
#undef MACHINE_REPRESENTATION_LIST
#undef ATOMIC_TYPE_LIST
+#undef ATOMIC64_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
+#undef ATOMIC64_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef SIMD_FORMAT_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 2cc1829116..e3a4a6c234 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -45,7 +45,8 @@ class OptionalOperator final {
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
-LoadRepresentation LoadRepresentationOf(Operator const*);
+V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
// correct write barrier.
@@ -71,17 +72,14 @@ size_t hash_value(StoreRepresentation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
-StoreRepresentation const& StoreRepresentationOf(Operator const*);
-
-typedef MachineType UnalignedLoadRepresentation;
-
-UnalignedLoadRepresentation UnalignedLoadRepresentationOf(Operator const*);
+V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
// An UnalignedStore needs a MachineType.
typedef MachineRepresentation UnalignedStoreRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
- Operator const*);
+ Operator const*) V8_WARN_UNUSED_RESULT;
class StackSlotRepresentation final {
public:
@@ -105,11 +103,13 @@ size_t hash_value(StackSlotRepresentation);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
StackSlotRepresentation);
-StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op);
+V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
+ Operator const* op) V8_WARN_UNUSED_RESULT;
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
+MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
+ V8_WARN_UNUSED_RESULT;
-MachineType AtomicOpRepresentationOf(Operator const* op);
+MachineType AtomicOpRepresentationOf(Operator const* op) V8_WARN_UNUSED_RESULT;
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
@@ -592,7 +592,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* ProtectedStore(MachineRepresentation rep);
// unaligned load [base + index]
- const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
+ const Operator* UnalignedLoad(LoadRepresentation rep);
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
@@ -600,23 +600,34 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
- // Returns a value which can be used as a mask to poison values when executing
- // speculatively.
- const Operator* SpeculationPoison();
+ // Destroy value by masking when misspeculating.
+ const Operator* PoisonOnSpeculationTagged();
+ const Operator* PoisonOnSpeculationWord();
// Access to the machine stack.
const Operator* LoadStackPointer();
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
+ // Access to the root register.
+ const Operator* LoadRootsPointer();
+
// atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep);
+ // atomic-load [base + index]
+ const Operator* Word64AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
const Operator* Word32AtomicStore(MachineRepresentation rep);
+ // atomic-store [base + index], value
+ const Operator* Word64AtomicStore(MachineRepresentation rep);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType rep);
+ // atomic-exchange [base + index], value
+ const Operator* Word64AtomicExchange(MachineType rep);
// atomic-compare-exchange [base + index], old_value, new_value
const Operator* Word32AtomicCompareExchange(MachineType rep);
+ // atomic-compare-exchange [base + index], old_value, new_value
+ const Operator* Word64AtomicCompareExchange(MachineType rep);
// atomic-add [base + index], value
const Operator* Word32AtomicAdd(MachineType rep);
// atomic-sub [base + index], value
@@ -627,6 +638,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word32AtomicOr(MachineType rep);
// atomic-xor [base + index], value
const Operator* Word32AtomicXor(MachineType rep);
+ // atomic-load [base + index]
+ const Operator* Word64AtomicAdd(MachineType rep);
+ // atomic-sub [base + index], value
+ const Operator* Word64AtomicSub(MachineType rep);
+ // atomic-and [base + index], value
+ const Operator* Word64AtomicAnd(MachineType rep);
+ // atomic-or [base + index], value
+ const Operator* Word64AtomicOr(MachineType rep);
+ // atomic-xor [base + index], value
+ const Operator* Word64AtomicXor(MachineType rep);
const OptionalOperator SpeculationFence();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 95418c4a81..3cc64ac73d 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -16,14 +16,16 @@ namespace internal {
namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- LoadPoisoning load_poisoning)
+ PoisoningMitigationLevel poisoning_enabled,
+ AllocationFolding allocation_folding)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
- load_poisoning_(load_poisoning) {}
+ poisoning_enabled_(poisoning_enabled),
+ allocation_folding_(allocation_folding) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -172,7 +174,8 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
Int32Matcher m(size);
if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
int32_t const object_size = m.Value();
- if (state->size() <= kMaxRegularHeapObjectSize - object_size &&
+ if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
+ state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
@@ -181,7 +184,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
- if (OpParameter<int32_t>(group->size()) < state_size) {
+ if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int32Constant(state_size));
}
@@ -350,7 +353,7 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
- if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ if (poisoning_enabled_ == PoisoningMitigationLevel::kOn &&
access.machine_type.representation() !=
MachineRepresentation::kTaggedPointer) {
NodeProperties::ChangeOp(node,
@@ -366,7 +369,7 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
- if (load_poisoning_ == LoadPoisoning::kDoPoison &&
+ if (poisoning_enabled_ == PoisoningMitigationLevel::kOn &&
access.machine_type.representation() !=
MachineRepresentation::kTaggedPointer) {
NodeProperties::ChangeOp(node,
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 38643ea8a3..28fdb4cc98 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -31,7 +31,11 @@ typedef uint32_t NodeId;
// implicitly.
class MemoryOptimizer final {
public:
- MemoryOptimizer(JSGraph* jsgraph, Zone* zone, LoadPoisoning load_poisoning);
+ enum class AllocationFolding { kDoAllocationFolding, kDontAllocationFolding };
+
+ MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
+ PoisoningMitigationLevel poisoning_enabled,
+ AllocationFolding allocation_folding);
~MemoryOptimizer() {}
void Optimize();
@@ -142,7 +146,8 @@ class MemoryOptimizer final {
ZoneQueue<Token> tokens_;
Zone* const zone_;
GraphAssembler graph_assembler_;
- LoadPoisoning load_poisoning_;
+ PoisoningMitigationLevel poisoning_enabled_;
+ AllocationFolding allocation_folding_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 91e68feb94..aad88fb763 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -4,7 +4,6 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/gap-resolver.h"
@@ -12,6 +11,7 @@
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
#include "src/mips/macro-assembler-mips.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -19,13 +19,6 @@ namespace compiler {
#define __ tasm()->
-// TODO(plind): Possibly avoid using these lithium names.
-#define kScratchReg kLithiumScratchReg
-#define kCompareReg kLithiumScratchReg2
-#define kScratchReg2 kLithiumScratchReg2
-#define kScratchDoubleReg kLithiumScratchDouble
-
-
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
@@ -143,41 +136,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineRound : public OutOfLineCode {
- public:
- OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Handle rounding to zero case where sign has to be preserved.
- // High bits of double input already in kScratchReg.
- __ srl(at, kScratchReg, 31);
- __ sll(at, at, 31);
- __ Mthc1(at, result_);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineRound32 : public OutOfLineCode {
- public:
- OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Handle rounding to zero case where sign has to be preserved.
- // High bits of float input already in kScratchReg.
- __ srl(at, kScratchReg, 31);
- __ sll(at, at, 31);
- __ mtc1(at, result_);
- }
-
- private:
- DoubleRegister const result_;
-};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
@@ -331,13 +289,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
return OLT;
case kUnsignedGreaterThanOrEqual:
predicate = false;
- return ULT;
+ return OLT;
case kUnsignedLessThanOrEqual:
predicate = true;
return OLE;
case kUnsignedGreaterThan:
predicate = false;
- return ULE;
+ return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
predicate = true;
@@ -349,59 +307,24 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-} // namespace
+#define UNSUPPORTED_COND(opcode, condition) \
+ OFStream out(stdout); \
+ out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+ UNIMPLEMENTED();
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
- if (IsMipsArchVariant(kMips32r6)) { \
- __ cfc1(kScratchReg, FCSR); \
- __ li(at, Operand(mode_##mode)); \
- __ ctc1(at, FCSR); \
- __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ ctc1(kScratchReg, FCSR); \
- } else { \
- auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
- Label done; \
- __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
- __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
- HeapNumber::kExponentBits); \
- __ Branch(USE_DELAY_SLOT, &done, hs, at, \
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
- __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
- __ or_(at, at, kScratchReg2); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
- __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
- __ bind(ool->exit()); \
- __ bind(&done); \
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
+}
+} // namespace
-#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
- if (IsMipsArchVariant(kMips32r6)) { \
- __ cfc1(kScratchReg, FCSR); \
- __ li(at, Operand(mode_##mode)); \
- __ ctc1(at, FCSR); \
- __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ ctc1(kScratchReg, FCSR); \
- } else { \
- int32_t kFloat32ExponentBias = 127; \
- int32_t kFloat32MantissaBits = 23; \
- int32_t kFloat32ExponentBits = 8; \
- auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
- Label done; \
- __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
- __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
- __ Branch(USE_DELAY_SLOT, &done, hs, at, \
- Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
- __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mfc1(at, i.OutputDoubleRegister()); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
- __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
- __ bind(ool->exit()); \
- __ bind(&done); \
- }
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
@@ -642,7 +565,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
@@ -872,6 +795,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ mov(i.OutputRegister(), kRootRegister);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
@@ -928,6 +854,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArchPoisonOnSpeculationWord:
+ __ And(i.OutputRegister(), i.InputRegister(0),
+ kSpeculationPoisonRegister);
+ break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -977,8 +907,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log2);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
break;
}
case kIeee754Float64Sin:
@@ -997,19 +926,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsAddOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMipsSub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsSubOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMipsMul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsMulOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMipsMulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -1098,11 +1030,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
__ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2), kScratchReg,
+ kScratchReg2);
} else {
uint32_t imm = i.InputOperand(2).immediate();
__ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), imm);
+ i.InputRegister(1), imm, kScratchReg);
}
} break;
case kMipsShrPair: {
@@ -1110,11 +1043,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
__ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2), kScratchReg,
+ kScratchReg2);
} else {
uint32_t imm = i.InputOperand(2).immediate();
__ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), imm);
+ i.InputRegister(1), imm, kScratchReg);
}
} break;
case kMipsSarPair: {
@@ -1122,11 +1056,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
__ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), i.InputRegister(2));
+ i.InputRegister(1), i.InputRegister(2), kScratchReg,
+ kScratchReg2);
} else {
uint32_t imm = i.InputOperand(2).immediate();
__ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
- i.InputRegister(1), imm);
+ i.InputRegister(1), imm, kScratchReg);
}
} break;
case kMipsExt:
@@ -1145,7 +1080,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMipsTst:
- // Pseudo-instruction used for tst/branch. No opcode emitted here.
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
break;
case kMipsCmp:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
@@ -1164,9 +1099,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputInt8(2));
break;
- case kMipsCmpS:
- // Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
- break;
+ case kMipsCmpS: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(cc, left, right);
+ } break;
case kMipsAddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1214,9 +1160,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsCmpD:
- // Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
- break;
+ case kMipsCmpD: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ CompareF64(cc, left, right);
+ } break;
case kMipsAddPair:
__ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputRegister(2), i.InputRegister(3));
@@ -1306,35 +1261,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsFloat64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsFloat32RoundDown: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
+ __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMipsFloat64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsFloat32RoundTruncate: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
+ __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMipsFloat64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsFloat32RoundUp: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMipsFloat64RoundTiesEven: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMipsFloat32RoundTiesEven: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMipsFloat32Max: {
@@ -1499,24 +1454,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1526,9 +1487,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -2809,37 +2772,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} // NOLINT(readability/fn_size)
-#define UNSUPPORTED_COND(opcode, condition) \
- OFStream out(stdout); \
- out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
- UNIMPLEMENTED();
-
-static bool convertCondition(FlagsCondition condition, Condition& cc) {
- switch (condition) {
- case kEqual:
- cc = eq;
- return true;
- case kNotEqual:
- cc = ne;
- return true;
- case kUnsignedLessThan:
- cc = lt;
- return true;
- case kUnsignedGreaterThanOrEqual:
- cc = uge;
- return true;
- case kUnsignedLessThanOrEqual:
- cc = le;
- return true;
- case kUnsignedGreaterThan:
- cc = ugt;
- return true;
- default:
- break;
- }
- return false;
-}
-
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -2857,45 +2789,29 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
MipsOperandConverter i(gen, instr);
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(tlabel, cc, at, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMipsAddOvf) {
- switch (condition) {
- case kOverflow:
- __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel);
- break;
- case kNotOverflow:
- __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel);
- break;
- default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
- break;
- }
- } else if (instr->arch_opcode() == kMipsSubOvf) {
+ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMipsAddOvf ||
+ instr->arch_opcode() == kMipsSubOvf) {
+ // Overflow occurs if overflow register is negative
switch (condition) {
case kOverflow:
- __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel);
+ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
break;
case kNotOverflow:
- __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel);
+ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
+ // Overflow occurs if overflow register is not zero
switch (condition) {
case kOverflow:
- __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel);
+ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
break;
case kNotOverflow:
- __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel);
+ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
@@ -2904,28 +2820,15 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
- } else if (instr->arch_opcode() == kMipsCmpS) {
- if (!convertCondition(condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, condition);
- }
- FPURegister left = i.InputOrZeroSingleRegister(0);
- FPURegister right = i.InputOrZeroSingleRegister(1);
- if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
- !__ IsDoubleZeroRegSet()) {
- __ Move(kDoubleRegZero, 0.0);
- }
- __ BranchF32(tlabel, nullptr, cc, left, right);
- } else if (instr->arch_opcode() == kMipsCmpD) {
- if (!convertCondition(condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, condition);
- }
- FPURegister left = i.InputOrZeroDoubleRegister(0);
- FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
- !__ IsDoubleZeroRegSet()) {
- __ Move(kDoubleRegZero, 0.0);
+ } else if (instr->arch_opcode() == kMipsCmpS ||
+ instr->arch_opcode() == kMipsCmpD) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
}
- __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -2946,7 +2849,70 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
- UNREACHABLE();
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ MipsOperandConverter i(this, instr);
+ condition = NegateFlagsCondition(condition);
+
+ switch (instr->arch_opcode()) {
+ case kMipsCmp: {
+ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
+ i.InputOperand(1),
+ FlagsConditionToConditionCmp(condition));
+ }
+ return;
+ case kMipsTst: {
+ switch (condition) {
+ case kEqual:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ case kNotEqual:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return;
+ case kMipsAddOvf:
+ case kMipsSubOvf: {
+ // Overflow occurs if overflow register is negative
+ __ Slt(kScratchReg2, kScratchReg, zero_reg);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kMipsMulOvf: {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ default:
+ break;
+ }
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -3043,50 +3009,19 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
- if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
- uint16_t pos =
- base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
- __ Ext(result, i.InputRegister(0), pos, 1);
- if (cc == eq) {
- __ xori(result, result, 1);
- }
+ if (cc == eq) {
+ __ Sltu(result, kScratchReg, 1);
} else {
- __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- if (cc == eq) {
- __ Sltu(result, kScratchReg, 1);
- } else {
- __ Sltu(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
}
return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf ||
- instr->arch_opcode() == kMipsMulOvf) {
- Label flabel, tlabel;
- switch (instr->arch_opcode()) {
- case kMipsAddOvf:
- __ AddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel);
-
- break;
- case kMipsSubOvf:
- __ SubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel);
- break;
- case kMipsMulOvf:
- __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ li(result, 1);
- __ Branch(&tlabel);
- __ bind(&flabel);
- __ li(result, 0);
- __ bind(&tlabel);
+ instr->arch_opcode() == kMipsSubOvf) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kMipsMulOvf) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, kScratchReg, zero_reg);
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -3181,27 +3116,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
__ li(result, Operand(1));
- if (instr->arch_opcode() == kMipsCmpD) {
- __ c(cc, D, left, right);
- } else {
- DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
- __ c(cc, S, left, right);
- }
if (predicate) {
__ Movf(result, zero_reg);
} else {
__ Movt(result, zero_reg);
}
} else {
- if (instr->arch_opcode() == kMipsCmpD) {
- __ cmp(cc, L, kDoubleCompareReg, left, right);
- } else {
- DCHECK_EQ(kMipsCmpS, instr->arch_opcode());
- __ cmp(cc, W, kDoubleCompareReg, left, right);
- }
__ mfc1(result, kDoubleCompareReg);
if (predicate) {
__ And(result, result, 1); // cmp returns all 1's/0's, use only LSB.
@@ -3293,6 +3216,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3449,8 +3373,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ sw(zero_reg, dst);
} else {
- __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ sw(kScratchReg, dst);
}
} else {
DCHECK(destination->IsFPRegister());
@@ -3557,7 +3481,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
- Register temp_1 = kCompareReg;
+ Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ lw(temp_0, src);
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index f0b8a0d588..f8ca119b83 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -48,7 +48,7 @@ class MipsOperandGenerator final : public OperandGenerator {
int64_t GetIntegerConstantValue(Node* node) {
DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
- return OpParameter<int32_t>(node);
+ return OpParameter<int32_t>(node->op());
}
bool IsFloatConstant(Node* node) {
@@ -58,10 +58,10 @@ class MipsOperandGenerator final : public OperandGenerator {
double GetFloatConstantValue(Node* node) {
if (node->opcode() == IrOpcode::kFloat32Constant) {
- return OpParameter<float>(node);
+ return OpParameter<float>(node->op());
}
DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
- return OpParameter<double>(node);
+ return OpParameter<double>(node->op());
}
bool CanBeImmediate(Node* node, InstructionCode opcode) {
@@ -133,7 +133,7 @@ static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
MipsOperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
}
@@ -141,7 +141,7 @@ static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
MipsOperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
g.UseRegister(node->InputAt(1)));
@@ -174,9 +174,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand inputs[4];
+ InstructionOperand inputs[2];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
@@ -194,13 +194,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.TempImmediate(cont->trap_id());
- }
-
if (cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure that
// the deopt inputs are not overwritten by the binop result. One way
@@ -209,23 +202,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
} else {
outputs[output_count++] = g.DefineAsRegister(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -297,7 +281,7 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -1235,8 +1219,7 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitUnalignedLoad(Node* node) {
- UnalignedLoadRepresentation load_rep =
- UnalignedLoadRepresentationOf(node->op());
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1342,22 +1325,7 @@ namespace {
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- MipsOperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.TempImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1556,23 +1524,8 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
// Continuation could not be combined with a compare, emit compare against 0.
MipsOperandGenerator g(this);
- InstructionCode const opcode = cont->Encode(kMipsCmp);
InstructionOperand const value_operand = g.UseRegister(value);
- if (cont->IsBranch()) {
- Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- EmitDeoptimize(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- g.TempImmediate(0));
- } else {
- DCHECK(cont->IsTrap());
- Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.TempImmediate(cont->trap_id()));
- }
+ EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont);
}
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
@@ -1582,20 +1535,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 9 + sw.value_range;
+ size_t table_space_cost = 9 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kMipsSub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ g.TempImmediate(sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -2200,7 +2153,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
uint8_t mask = CanonicalizeShuffle(node);
uint8_t shuffle32x4[4];
ArchOpcode opcode;
@@ -2280,9 +2233,6 @@ InstructionSelector::AlignmentRequirements() {
}
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
-
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index ab84fe22b2..3d4c0163db 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -4,7 +4,6 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/gap-resolver.h"
@@ -12,6 +11,7 @@
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -19,11 +19,6 @@ namespace compiler {
#define __ tasm()->
-// TODO(plind): Possibly avoid using these lithium names.
-#define kScratchReg kLithiumScratchReg
-#define kScratchReg2 kLithiumScratchReg2
-#define kScratchDoubleReg kLithiumScratchDouble
-
// TODO(plind): consider renaming these macros.
#define TRACE_MSG(msg) \
@@ -143,41 +138,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineRound : public OutOfLineCode {
- public:
- OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Handle rounding to zero case where sign has to be preserved.
- // High bits of double input already in kScratchReg.
- __ dsrl(at, kScratchReg, 31);
- __ dsll(at, at, 31);
- __ mthc1(at, result_);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineRound32 : public OutOfLineCode {
- public:
- OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Handle rounding to zero case where sign has to be preserved.
- // High bits of float input already in kScratchReg.
- __ srl(at, kScratchReg, 31);
- __ sll(at, at, 31);
- __ mtc1(at, result_);
- }
-
- private:
- DoubleRegister const result_;
-};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
@@ -344,13 +304,13 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
return OLT;
case kUnsignedGreaterThanOrEqual:
predicate = false;
- return ULT;
+ return OLT;
case kUnsignedLessThanOrEqual:
predicate = true;
return OLE;
case kUnsignedGreaterThan:
predicate = false;
- return ULE;
+ return OLE;
case kUnorderedEqual:
case kUnorderedNotEqual:
predicate = true;
@@ -362,57 +322,18 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-} // namespace
-
-#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
- if (kArchVariant == kMips64r6) { \
- __ cfc1(kScratchReg, FCSR); \
- __ li(at, Operand(mode_##mode)); \
- __ ctc1(at, FCSR); \
- __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ ctc1(kScratchReg, FCSR); \
- } else { \
- auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
- Label done; \
- __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
- __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
- HeapNumber::kExponentBits); \
- __ Branch(USE_DELAY_SLOT, &done, hs, at, \
- Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
- __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ dmfc1(at, i.OutputDoubleRegister()); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
- __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
- __ bind(ool->exit()); \
- __ bind(&done); \
+void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
+ InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) {
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ Register value = i.OutputRegister();
+ codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
+}
-#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
- if (kArchVariant == kMips64r6) { \
- __ cfc1(kScratchReg, FCSR); \
- __ li(at, Operand(mode_##mode)); \
- __ ctc1(at, FCSR); \
- __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ ctc1(kScratchReg, FCSR); \
- } else { \
- int32_t kFloat32ExponentBias = 127; \
- int32_t kFloat32MantissaBits = 23; \
- int32_t kFloat32ExponentBits = 8; \
- auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
- Label done; \
- __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
- __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
- __ Branch(USE_DELAY_SLOT, &done, hs, at, \
- Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
- __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
- __ mfc1(at, i.OutputDoubleRegister()); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
- __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
- __ bind(ool->exit()); \
- __ bind(&done); \
- }
+} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
@@ -658,7 +579,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, at, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
// difference = (current - expected) | (expected - current)
@@ -896,6 +817,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ mov(i.OutputRegister(), kRootRegister);
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
i.InputDoubleRegister(0));
@@ -952,6 +876,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
+ case kArchPoisonOnSpeculationWord:
+ __ And(i.OutputRegister(), i.InputRegister(0),
+ kSpeculationPoisonRegister);
+ break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1001,8 +929,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
break;
}
case kIeee754Float64Sin:
@@ -1024,7 +951,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DaddOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ DaddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -1033,13 +961,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64DsubOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ DsubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64MulOvf:
- // Pseudo-instruction used for overflow/branch. No opcode emitted here.
+ __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1),
+ kScratchReg);
break;
case kMips64MulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -1294,6 +1224,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Tst:
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Cmp:
@@ -1309,9 +1240,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
- case kMips64CmpS:
- // Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
- break;
+ case kMips64CmpS: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(cc, left, right);
+ } break;
case kMips64AddS:
// TODO(plind): add special case: combine mult & add.
__ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1363,9 +1305,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMips64CmpD:
- // Pseudo-instruction used for FP cmp/branch. No opcode emitted here.
- break;
+ case kMips64CmpD: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ CompareF64(cc, left, right);
+ } break;
case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
__ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1417,35 +1368,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64Float64RoundDown: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
+ __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64Float32RoundDown: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
+ __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64Float64RoundTruncate: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
+ __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64Float32RoundTruncate: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
+ __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64Float64RoundUp: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
+ __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64Float32RoundUp: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
+ __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64Float64RoundTiesEven: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
+ __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kMips64Float32RoundTiesEven: {
- ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
+ __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
}
case kMips64Float32Max: {
@@ -1703,24 +1654,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1730,21 +1687,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -3027,31 +2990,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
UNIMPLEMENTED();
-static bool convertCondition(FlagsCondition condition, Condition& cc) {
- switch (condition) {
- case kEqual:
- cc = eq;
- return true;
- case kNotEqual:
- cc = ne;
- return true;
- case kUnsignedLessThan:
- cc = lt;
- return true;
- case kUnsignedGreaterThanOrEqual:
- cc = uge;
- return true;
- case kUnsignedLessThanOrEqual:
- cc = le;
- return true;
- case kUnsignedGreaterThan:
- cc = ugt;
- return true;
- default:
- break;
- }
- return false;
-}
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
@@ -3070,81 +3008,52 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(tlabel, cc, at, Operand(zero_reg));
+ __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
- } else if (instr->arch_opcode() == kMips64DaddOvf) {
+ } else if (instr->arch_opcode() == kMips64DaddOvf ||
+ instr->arch_opcode() == kMips64DsubOvf) {
switch (condition) {
+ // Overflow occurs if overflow register is negative
case kOverflow:
- __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel);
+ __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg));
break;
case kNotOverflow:
- __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel);
+ __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg));
break;
default:
- UNSUPPORTED_COND(kMips64DaddOvf, condition);
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
break;
}
- } else if (instr->arch_opcode() == kMips64DsubOvf) {
+ } else if (instr->arch_opcode() == kMips64MulOvf) {
+ // Overflow occurs if overflow register is not zero
switch (condition) {
case kOverflow:
- __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel);
+ __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg));
break;
case kNotOverflow:
- __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel);
- break;
- default:
- UNSUPPORTED_COND(kMips64DsubOvf, condition);
+ __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg));
break;
- }
- } else if (instr->arch_opcode() == kMips64MulOvf) {
- switch (condition) {
- case kOverflow: {
- __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), tlabel, flabel, kScratchReg);
- } break;
- case kNotOverflow: {
- __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), flabel, tlabel, kScratchReg);
- } break;
default:
- UNSUPPORTED_COND(kMips64MulOvf, condition);
+ UNSUPPORTED_COND(kMipsMulOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
- } else if (instr->arch_opcode() == kMips64CmpS) {
- if (!convertCondition(condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, condition);
- }
- FPURegister left = i.InputOrZeroSingleRegister(0);
- FPURegister right = i.InputOrZeroSingleRegister(1);
- if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
- !__ IsDoubleZeroRegSet()) {
- __ Move(kDoubleRegZero, 0.0);
- }
- __ BranchF32(tlabel, nullptr, cc, left, right);
- } else if (instr->arch_opcode() == kMips64CmpD) {
- if (!convertCondition(condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, condition);
- }
- FPURegister left = i.InputOrZeroDoubleRegister(0);
- FPURegister right = i.InputOrZeroDoubleRegister(1);
- if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
- !__ IsDoubleZeroRegSet()) {
- __ Move(kDoubleRegZero, 0.0);
+ } else if (instr->arch_opcode() == kMips64CmpS ||
+ instr->arch_opcode() == kMips64CmpD) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
}
- __ BranchF64(tlabel, nullptr, cc, left, right);
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
instr->arch_opcode());
@@ -3166,7 +3075,88 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
- UNREACHABLE();
+ // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
+ if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
+ return;
+ }
+
+ MipsOperandConverter i(this, instr);
+ condition = NegateFlagsCondition(condition);
+
+ switch (instr->arch_opcode()) {
+ case kMips64Cmp: {
+ __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
+ i.InputOperand(1),
+ FlagsConditionToConditionCmp(condition));
+ }
+ return;
+ case kMips64Tst: {
+ switch (condition) {
+ case kEqual:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ case kNotEqual:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return;
+ case kMips64Dadd:
+ case kMips64Dsub: {
+ // Check for overflow creates 1 or 0 for result.
+ __ dsrl32(kScratchReg, i.OutputRegister(), 31);
+ __ srl(at, i.OutputRegister(), 31);
+ __ xor_(at, kScratchReg, at);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, at);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, at);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kMips64DaddOvf:
+ case kMips64DsubOvf: {
+ // Overflow occurs if overflow register is negative
+ __ Slt(kScratchReg2, kScratchReg, zero_reg);
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg2);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ case kMips64MulOvf: {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
+ kScratchReg);
+ break;
+ case kNotOverflow:
+ __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ }
+ return;
+ default:
+ break;
+ }
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -3261,21 +3251,10 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
- if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
- uint16_t pos =
- base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
- __ Dext(result, i.InputRegister(0), pos, 1);
- if (cc == eq) {
- __ xori(result, result, 1);
- }
+ if (cc == eq) {
+ __ Sltu(result, kScratchReg, 1);
} else {
- __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- if (cc == eq) {
- __ Sltu(result, kScratchReg, 1);
- } else {
- __ Sltu(result, zero_reg, kScratchReg);
- }
+ __ Sltu(result, zero_reg, kScratchReg);
}
return;
} else if (instr->arch_opcode() == kMips64Dadd ||
@@ -3289,32 +3268,12 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ xori(result, result, 1);
return;
} else if (instr->arch_opcode() == kMips64DaddOvf ||
- instr->arch_opcode() == kMips64DsubOvf ||
- instr->arch_opcode() == kMips64MulOvf) {
- Label flabel, tlabel;
- switch (instr->arch_opcode()) {
- case kMips64DaddOvf:
- __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel);
-
- break;
- case kMips64DsubOvf:
- __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel);
- break;
- case kMips64MulOvf:
- __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), &flabel, kScratchReg);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ li(result, 1);
- __ Branch(&tlabel);
- __ bind(&flabel);
- __ li(result, 0);
- __ bind(&tlabel);
+ instr->arch_opcode() == kMips64DsubOvf) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, kScratchReg, zero_reg);
+ } else if (instr->arch_opcode() == kMips64MulOvf) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, kScratchReg, zero_reg);
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
switch (cc) {
@@ -3409,15 +3368,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ Move(kDoubleRegZero, 0.0);
}
bool predicate;
- FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
+ FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
__ li(result, Operand(1));
- if (instr->arch_opcode() == kMips64CmpD) {
- __ c(cc, D, left, right);
- } else {
- DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
- __ c(cc, S, left, right);
- }
if (predicate) {
__ Movf(result, zero_reg);
} else {
@@ -3425,11 +3378,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
} else {
if (instr->arch_opcode() == kMips64CmpD) {
- __ cmp(cc, L, kDoubleCompareReg, left, right);
__ dmfc1(result, kDoubleCompareReg);
} else {
DCHECK_EQ(kMips64CmpS, instr->arch_opcode());
- __ cmp(cc, W, kDoubleCompareReg, left, right);
__ mfc1(result, kDoubleCompareReg);
}
if (predicate) {
@@ -3519,6 +3470,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
+ ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3636,11 +3588,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
- __ li(dst, Operand(src.ToInt32(), src.rmode()));
- } else {
- __ li(dst, Operand(src.ToInt32()));
- }
+ __ li(dst, Operand(src.ToInt32()));
break;
case Constant::kFloat32:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
@@ -3649,7 +3597,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ li(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ li(dst, Operand(src.ToInt64()));
}
break;
@@ -3680,8 +3627,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
__ Sw(zero_reg, dst);
} else {
- __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ Sw(at, dst);
+ __ li(kScratchReg, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ Sw(kScratchReg, dst);
}
} else {
DCHECK(destination->IsFPRegister());
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index f7c8cab67b..fab7be2625 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -49,10 +49,10 @@ class Mips64OperandGenerator final : public OperandGenerator {
int64_t GetIntegerConstantValue(Node* node) {
if (node->opcode() == IrOpcode::kInt32Constant) {
- return OpParameter<int32_t>(node);
+ return OpParameter<int32_t>(node->op());
}
DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
- return OpParameter<int64_t>(node);
+ return OpParameter<int64_t>(node->op());
}
bool IsFloatConstant(Node* node) {
@@ -62,10 +62,10 @@ class Mips64OperandGenerator final : public OperandGenerator {
double GetFloatConstantValue(Node* node) {
if (node->opcode() == IrOpcode::kFloat32Constant) {
- return OpParameter<float>(node);
+ return OpParameter<float>(node->op());
}
DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
- return OpParameter<double>(node);
+ return OpParameter<double>(node->op());
}
bool CanBeImmediate(Node* node, InstructionCode mode) {
@@ -130,7 +130,7 @@ static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
}
@@ -138,7 +138,7 @@ static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Mips64OperandGenerator g(selector);
- int32_t imm = OpParameter<int32_t>(node);
+ int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
g.UseRegister(node->InputAt(1)));
@@ -268,9 +268,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand inputs[4];
+ InstructionOperand inputs[2];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
@@ -288,13 +288,6 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.TempImmediate(cont->trap_id());
- }
-
if (cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure that
// the deopt inputs are not overwritten by the binop result. One way
@@ -303,23 +296,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
} else {
outputs[output_count++] = g.DefineAsRegister(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -411,7 +395,7 @@ void InstructionSelector::VisitLoad(Node* node) {
return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -1725,8 +1709,7 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
void InstructionSelector::VisitUnalignedLoad(Node* node) {
- UnalignedLoadRepresentation load_rep =
- UnalignedLoadRepresentationOf(node->op());
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
Mips64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1835,22 +1818,7 @@ namespace {
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- Mips64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.TempImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1963,8 +1931,7 @@ bool IsNodeUnsigned(Node* n) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else if (m.IsUnalignedLoad()) {
- UnalignedLoadRepresentation load_rep =
- UnalignedLoadRepresentationOf(n->op());
+ LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
@@ -2053,22 +2020,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- InstructionCode opcode = cont->Encode(kMips64Cmp);
- InstructionOperand const value_operand = g.UseRegister(value);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
- g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->feedback(), cont->frame_state());
- } else if (cont->IsTrap()) {
- selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.TempImmediate(cont->trap_id()));
- } else {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
- g.TempImmediate(0));
- }
+ selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
}
} // namespace
@@ -2198,20 +2151,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 2 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kMips64Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ g.TempImmediate(sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -2869,7 +2822,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
uint8_t mask = CanonicalizeShuffle(node);
uint8_t shuffle32x4[4];
ArchOpcode opcode;
@@ -2962,9 +2915,6 @@ InstructionSelector::AlignmentRequirements() {
}
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
-
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
#undef SIMD_UNOP_LIST
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index d1eecfe9fd..c31f9691f2 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -54,7 +54,7 @@ struct ValueMatcher : public NodeMatcher {
explicit ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
if (has_value_) {
- value_ = OpParameter<T>(node);
+ value_ = OpParameter<T>(node->op());
}
}
@@ -77,7 +77,7 @@ inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
value_(),
has_value_(opcode() == IrOpcode::kInt32Constant) {
if (has_value_) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
}
}
@@ -86,10 +86,10 @@ template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
- value_ = OpParameter<int32_t>(node);
+ value_ = OpParameter<int32_t>(node->op());
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = OpParameter<int64_t>(node);
+ value_ = OpParameter<int64_t>(node->op());
has_value_ = true;
}
}
@@ -100,10 +100,10 @@ inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
- value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node->op()));
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = static_cast<uint64_t>(OpParameter<int64_t>(node));
+ value_ = static_cast<uint64_t>(OpParameter<int64_t>(node->op()));
has_value_ = true;
}
}
@@ -629,11 +629,11 @@ struct BaseWithIndexAndDisplacementMatcher {
if (displacement != nullptr) {
switch (displacement->opcode()) {
case IrOpcode::kInt32Constant: {
- value = OpParameter<int32_t>(displacement);
+ value = OpParameter<int32_t>(displacement->op());
break;
}
case IrOpcode::kInt64Constant: {
- value = OpParameter<int64_t>(displacement);
+ value = OpParameter<int64_t>(displacement->op());
break;
}
default:
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 5fe6e5d420..d8b23fb362 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -589,7 +589,6 @@ bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
switch (receiver->opcode()) {
case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
- case IrOpcode::kCheckSeqString:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckSymbol:
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 26fc03fb13..ded45a24f4 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -313,12 +313,6 @@ typedef ZoneVector<Node*> NodeVector;
typedef ZoneVector<NodeVector> NodeVectorVector;
-// Helper to extract parameters from Operator1<*> nodes.
-template <typename T>
-static inline const T& OpParameter(const Node* node) {
- return OpParameter<T>(node->op());
-}
-
class Node::InputEdges final {
public:
typedef Edge value_type;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 9a8f1e1df8..29a0462767 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -136,13 +136,16 @@
V(JSCreate) \
V(JSCreateArguments) \
V(JSCreateArray) \
+ V(JSCreateArrayIterator) \
V(JSCreateBoundFunction) \
V(JSCreateClosure) \
+ V(JSCreateCollectionIterator) \
V(JSCreateGeneratorObject) \
V(JSCreateIterResultObject) \
V(JSCreateStringIterator) \
V(JSCreateKeyValueArray) \
V(JSCreatePromise) \
+ V(JSCreateTypedArray) \
V(JSCreateLiteralArray) \
V(JSCreateEmptyLiteralArray) \
V(JSCreateLiteralObject) \
@@ -159,6 +162,7 @@
V(JSStoreNamedOwn) \
V(JSStoreGlobal) \
V(JSStoreDataPropertyInLiteral) \
+ V(JSStoreInArrayLiteral) \
V(JSDeleteProperty) \
V(JSHasProperty) \
V(JSGetSuperConstructor)
@@ -339,13 +343,10 @@
V(PlainPrimitiveToFloat64) \
V(BooleanNot) \
V(StringToNumber) \
- V(StringCharAt) \
V(StringCharCodeAt) \
- V(SeqStringCharCodeAt) \
V(StringCodePointAt) \
- V(SeqStringCodePointAt) \
- V(StringFromCharCode) \
- V(StringFromCodePoint) \
+ V(StringFromSingleCharCode) \
+ V(StringFromSingleCodePoint) \
V(StringIndexOf) \
V(StringLength) \
V(StringToLowerCaseIntl) \
@@ -358,7 +359,6 @@
V(CheckInternalizedString) \
V(CheckReceiver) \
V(CheckString) \
- V(CheckSeqString) \
V(CheckSymbol) \
V(CheckSmi) \
V(CheckHeapObject) \
@@ -385,6 +385,12 @@
V(TransitionAndStoreNonNumberElement) \
V(ToBoolean) \
V(NumberIsFloat64Hole) \
+ V(NumberIsFinite) \
+ V(ObjectIsFiniteNumber) \
+ V(NumberIsInteger) \
+ V(ObjectIsSafeInteger) \
+ V(NumberIsSafeInteger) \
+ V(ObjectIsInteger) \
V(ObjectIsArrayBufferView) \
V(ObjectIsBigInt) \
V(ObjectIsCallable) \
@@ -606,10 +612,12 @@
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
- V(SpeculationPoison) \
+ V(PoisonOnSpeculationTagged) \
+ V(PoisonOnSpeculationWord) \
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
+ V(LoadRootsPointer) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
@@ -629,6 +637,15 @@
V(Word32AtomicAnd) \
V(Word32AtomicOr) \
V(Word32AtomicXor) \
+ V(Word64AtomicLoad) \
+ V(Word64AtomicStore) \
+ V(Word64AtomicAdd) \
+ V(Word64AtomicSub) \
+ V(Word64AtomicAnd) \
+ V(Word64AtomicOr) \
+ V(Word64AtomicXor) \
+ V(Word64AtomicExchange) \
+ V(Word64AtomicCompareExchange) \
V(SpeculationFence) \
V(SignExtendWord8ToInt32) \
V(SignExtendWord16ToInt32) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index fc774f8706..539683b41d 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -7,7 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 673d643da0..df2a3e1daf 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -71,6 +71,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSCreate:
case IrOpcode::kJSCreateArguments:
case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateTypedArray:
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index a5b840cb57..b2211b3b07 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -4,22 +4,22 @@
#include "src/compiler/osr.h"
-#include "src/compilation-info.h"
#include "src/compiler/frame.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/objects/shared-function-info.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
namespace compiler {
-OsrHelper::OsrHelper(CompilationInfo* info)
+OsrHelper::OsrHelper(OptimizedCompilationInfo* info)
: parameter_count_(
- info->shared_info()->bytecode_array()->parameter_count()),
+ info->shared_info()->GetBytecodeArray()->parameter_count()),
stack_slot_count_(
InterpreterFrameConstants::RegisterStackSlotCount(
- info->shared_info()->bytecode_array()->register_count()) +
+ info->shared_info()->GetBytecodeArray()->register_count()) +
InterpreterFrameConstants::kExtraSlotCount) {}
void OsrHelper::SetupFrame(Frame* frame) {
diff --git a/deps/v8/src/compiler/osr.h b/deps/v8/src/compiler/osr.h
index 451f60bb72..673b29e7ec 100644
--- a/deps/v8/src/compiler/osr.h
+++ b/deps/v8/src/compiler/osr.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
+class OptimizedCompilationInfo;
namespace compiler {
@@ -20,7 +20,7 @@ class Frame;
// details of the frame layout.
class OsrHelper {
public:
- explicit OsrHelper(CompilationInfo* info);
+ explicit OsrHelper(OptimizedCompilationInfo* info);
// Prepares the frame w.r.t. OSR.
void SetupFrame(Frame* frame);
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 6019b344bf..af8fa50140 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -4,12 +4,12 @@
#include <memory>
-#include "src/compilation-info.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-stats.h"
#include "src/isolate.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -45,8 +45,8 @@ void PipelineStatistics::CommonStats::End(
timer_.Stop();
}
-PipelineStatistics::PipelineStatistics(CompilationInfo* info, Isolate* isolate,
- ZoneStats* zone_stats)
+PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
+ Isolate* isolate, ZoneStats* zone_stats)
: isolate_(isolate),
outer_zone_(info->zone()),
zone_stats_(zone_stats),
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 900cd1cd8f..56467f496a 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -20,7 +20,7 @@ class PhaseScope;
class PipelineStatistics : public Malloced {
public:
- PipelineStatistics(CompilationInfo* info, Isolate* isolate,
+ PipelineStatistics(OptimizedCompilationInfo* info, Isolate* isolate,
ZoneStats* zone_stats);
~PipelineStatistics();
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index fe29917e61..795f2c19c3 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -13,7 +13,6 @@
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/bootstrapper.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
@@ -69,6 +68,7 @@
#include "src/compiler/verifier.h"
#include "src/compiler/zone-stats.h"
#include "src/isolate-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
@@ -91,7 +91,8 @@ const int kMaxBytecodeSizeForTurbofan = 128 * 1024;
class PipelineData {
public:
// For main entry point.
- PipelineData(ZoneStats* zone_stats, Isolate* isolate, CompilationInfo* info,
+ PipelineData(ZoneStats* zone_stats, Isolate* isolate,
+ OptimizedCompilationInfo* info,
PipelineStatistics* pipeline_statistics)
: isolate_(isolate),
info_(info),
@@ -122,11 +123,11 @@ class PipelineData {
}
// For WebAssembly compile entry point.
- PipelineData(ZoneStats* zone_stats, Isolate* isolate, CompilationInfo* info,
- JSGraph* jsgraph, PipelineStatistics* pipeline_statistics,
+ PipelineData(ZoneStats* zone_stats, Isolate* isolate,
+ OptimizedCompilationInfo* info, JSGraph* jsgraph,
+ PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
- std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions)
+ WasmCompilationData* wasm_compilation_data)
: isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
@@ -145,11 +146,11 @@ class PipelineData {
codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
- protected_instructions_(protected_instructions) {}
+ wasm_compilation_data_(wasm_compilation_data) {}
// For machine graph testing entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Isolate* isolate,
- Graph* graph, Schedule* schedule,
+ PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
+ Isolate* isolate, Graph* graph, Schedule* schedule,
SourcePositionTable* source_positions,
JumpOptimizationInfo* jump_opt)
: isolate_(isolate),
@@ -169,8 +170,8 @@ class PipelineData {
jump_optimization_info_(jump_opt) {}
// For register allocation testing entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Isolate* isolate,
- InstructionSequence* sequence)
+ PipelineData(ZoneStats* zone_stats, OptimizedCompilationInfo* info,
+ Isolate* isolate, InstructionSequence* sequence)
: isolate_(isolate),
info_(info),
debug_name_(info_->GetDebugName()),
@@ -194,7 +195,7 @@ class PipelineData {
}
Isolate* isolate() const { return isolate_; }
- CompilationInfo* info() const { return info_; }
+ OptimizedCompilationInfo* info() const { return info_; }
ZoneStats* zone_stats() const { return zone_stats_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
OsrHelper* osr_helper() { return &(*osr_helper_); }
@@ -341,12 +342,20 @@ class PipelineData {
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
+
+ CodeGeneratorPoisoningLevel poisoning =
+ CodeGeneratorPoisoningLevel::kDontPoison;
+ if (info()->has_untrusted_code_mitigations()) {
+ poisoning = CodeGeneratorPoisoningLevel::kPoisonStackPointerInPrologue;
+ }
+ if (info()->is_poison_loads()) {
+ poisoning = CodeGeneratorPoisoningLevel::kPoisonAll;
+ }
+
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- protected_instructions_,
- info()->is_poison_loads() ? LoadPoisoning::kDoPoison
- : LoadPoisoning::kDontPoison);
+ wasm_compilation_data_, poisoning);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -365,7 +374,7 @@ class PipelineData {
private:
Isolate* const isolate_;
- CompilationInfo* const info_;
+ OptimizedCompilationInfo* const info_;
std::unique_ptr<char[]> debug_name_;
bool may_have_unverifiable_graph_ = true;
ZoneStats* const zone_stats_;
@@ -417,8 +426,7 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
- nullptr;
+ WasmCompilationData* wasm_compilation_data_ = nullptr;
JumpOptimizationInfo* jump_optimization_info_ = nullptr;
@@ -460,7 +468,7 @@ class PipelineImpl final {
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* call_descriptor, bool run_verifier);
- CompilationInfo* info() const;
+ OptimizedCompilationInfo* info() const;
Isolate* isolate() const;
PipelineData* const data_;
@@ -470,7 +478,7 @@ namespace {
// Print function's source if it was not printed before.
// Return a sequential id under which this function was printed.
-int PrintFunctionSource(CompilationInfo* info, Isolate* isolate,
+int PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
std::vector<Handle<SharedFunctionInfo>>* printed,
int inlining_id, Handle<SharedFunctionInfo> shared) {
// Outermost function has source id -1 and inlined functions take
@@ -499,11 +507,11 @@ int PrintFunctionSource(CompilationInfo* info, Isolate* isolate,
}
os << shared->DebugName()->ToCString().get() << ") id{";
os << info->optimization_id() << "," << source_id << "} start{";
- os << shared->start_position() << "} ---\n";
+ os << shared->StartPosition() << "} ---\n";
{
DisallowHeapAllocation no_allocation;
- int start = shared->start_position();
- int len = shared->end_position() - start;
+ int start = shared->StartPosition();
+ int len = shared->EndPosition() - start;
String::SubStringRange source(String::cast(script->source()), start,
len);
for (const auto& c : source) {
@@ -520,9 +528,9 @@ int PrintFunctionSource(CompilationInfo* info, Isolate* isolate,
// Print information for the given inlining: which function was inlined and
// where the inlining occurred.
-void PrintInlinedFunctionInfo(CompilationInfo* info, Isolate* isolate,
- int source_id, int inlining_id,
- const CompilationInfo::InlinedFunctionHolder& h) {
+void PrintInlinedFunctionInfo(
+ OptimizedCompilationInfo* info, Isolate* isolate, int source_id,
+ int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
@@ -539,7 +547,7 @@ void PrintInlinedFunctionInfo(CompilationInfo* info, Isolate* isolate,
// Print the source of all functions that participated in this optimizing
// compilation. For inlined functions print source position of their inlining.
-void DumpParticipatingSource(CompilationInfo* info, Isolate* isolate) {
+void DumpParticipatingSource(OptimizedCompilationInfo* info, Isolate* isolate) {
AllowDeferredHandleDereference allow_deference_for_print_code;
std::vector<Handle<SharedFunctionInfo>> printed;
@@ -556,7 +564,7 @@ void DumpParticipatingSource(CompilationInfo* info, Isolate* isolate) {
}
// Print the code after compiling it.
-void PrintCode(Handle<Code> code, CompilationInfo* info) {
+void PrintCode(Handle<Code> code, OptimizedCompilationInfo* info) {
Isolate* isolate = code->GetIsolate();
if (FLAG_print_opt_source && info->IsOptimizing()) {
DumpParticipatingSource(info, isolate);
@@ -585,10 +593,10 @@ void PrintCode(Handle<Code> code, CompilationInfo* info) {
os << "--- Raw source ---\n";
StringCharacterStream stream(
String::cast(Script::cast(shared->script())->source()),
- shared->start_position());
+ shared->StartPosition());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
- int source_len = shared->end_position() - shared->start_position() + 1;
+ int source_len = shared->EndPosition() - shared->StartPosition() + 1;
for (int i = 0; i < source_len; i++) {
if (stream.HasMore()) {
os << AsReversiblyEscapedUC16(stream.GetNext());
@@ -605,7 +613,7 @@ void PrintCode(Handle<Code> code, CompilationInfo* info) {
}
if (print_source) {
Handle<SharedFunctionInfo> shared = info->shared_info();
- os << "source_position = " << shared->start_position() << "\n";
+ os << "source_position = " << shared->StartPosition() << "\n";
}
code->Disassemble(debug_name.get(), os);
os << "--- End code ---\n";
@@ -620,12 +628,12 @@ struct TurboCfgFile : public std::ofstream {
};
struct TurboJsonFile : public std::ofstream {
- TurboJsonFile(CompilationInfo* info, std::ios_base::openmode mode)
+ TurboJsonFile(OptimizedCompilationInfo* info, std::ios_base::openmode mode)
: std::ofstream(GetVisualizerLogFileName(info, nullptr, "json").get(),
mode) {}
};
-void TraceSchedule(CompilationInfo* info, Isolate* isolate,
+void TraceSchedule(OptimizedCompilationInfo* info, Isolate* isolate,
Schedule* schedule) {
if (FLAG_trace_turbo) {
AllowHandleDereference allow_deref;
@@ -709,7 +717,7 @@ class PipelineRunScope {
};
PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
- CompilationInfo* info,
+ OptimizedCompilationInfo* info,
Isolate* isolate,
ZoneStats* zone_stats) {
PipelineStatistics* pipeline_statistics = nullptr;
@@ -722,13 +730,13 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
if (FLAG_trace_turbo) {
TurboJsonFile json_of(info, std::ios_base::trunc);
std::unique_ptr<char[]> function_name = info->GetDebugName();
- int pos = info->IsStub() ? 0 : info->shared_info()->start_position();
+ int pos = info->IsStub() ? 0 : info->shared_info()->StartPosition();
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
if (!script.is_null() && !script->source()->IsUndefined(isolate)) {
DisallowHeapAllocation no_allocation;
- int start = info->shared_info()->start_position();
- int len = info->shared_info()->end_position() - start;
+ int start = info->shared_info()->StartPosition();
+ int len = info->shared_info()->EndPosition() - start;
String::SubStringRange source(String::cast(script->source()), start, len);
for (const auto& c : source) {
json_of << AsEscapedUC16ForJSON(c);
@@ -742,22 +750,23 @@ PipelineStatistics* CreatePipelineStatistics(Handle<Script> script,
} // namespace
-class PipelineCompilationJob final : public CompilationJob {
+class PipelineCompilationJob final : public OptimizedCompilationJob {
public:
- PipelineCompilationJob(ParseInfo* parse_info,
- Handle<SharedFunctionInfo> shared_info,
+ PipelineCompilationJob(Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function)
- // Note that the CompilationInfo is not initialized at the time we pass it
- // to the CompilationJob constructor, but it is not dereferenced there.
- : CompilationJob(parse_info->stack_limit(), parse_info,
- &compilation_info_, "TurboFan"),
- parse_info_(parse_info),
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(
+ function->GetIsolate()->stack_guard()->real_climit(),
+ &compilation_info_, "TurboFan"),
+ zone_(function->GetIsolate()->allocator(), ZONE_NAME),
zone_stats_(function->GetIsolate()->allocator()),
- compilation_info_(parse_info_.get()->zone(), function->GetIsolate(),
- shared_info, function),
- pipeline_statistics_(
- CreatePipelineStatistics(parse_info_->script(), compilation_info(),
- function->GetIsolate(), &zone_stats_)),
+ compilation_info_(&zone_, function->GetIsolate(), shared_info,
+ function),
+ pipeline_statistics_(CreatePipelineStatistics(
+ handle(Script::cast(shared_info->script())), compilation_info(),
+ function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(),
pipeline_statistics_.get()),
pipeline_(&data_),
@@ -772,9 +781,9 @@ class PipelineCompilationJob final : public CompilationJob {
void RegisterWeakObjectsInOptimizedCode(Handle<Code> code, Isolate* isolate);
private:
- std::unique_ptr<ParseInfo> parse_info_;
+ Zone zone_;
ZoneStats zone_stats_;
- CompilationInfo compilation_info_;
+ OptimizedCompilationInfo compilation_info_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
PipelineImpl pipeline_;
@@ -785,7 +794,7 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
Isolate* isolate) {
- if (compilation_info()->shared_info()->bytecode_array()->length() >
+ if (compilation_info()->shared_info()->GetBytecodeArray()->length() >
kMaxBytecodeSizeForTurbofan) {
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
@@ -805,13 +814,16 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_branch_load_poisoning) {
compilation_info()->MarkAsPoisonLoads();
}
+ if (FLAG_turbo_allocation_folding) {
+ compilation_info()->MarkAsAllocationFoldingEnabled();
+ }
if (compilation_info()->closure()->feedback_cell()->map() ==
isolate->heap()->one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
data_.set_start_source_position(
- compilation_info()->shared_info()->start_position());
+ compilation_info()->shared_info()->StartPosition());
linkage_ = new (compilation_info()->zone()) Linkage(
Linkage::ComputeIncoming(compilation_info()->zone(), compilation_info()));
@@ -854,29 +866,10 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
return SUCCEEDED;
}
-namespace {
-
-void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
- Handle<Code> code) {
- Handle<WeakCell> cell = Code::WeakCellFor(code);
- Heap* heap = isolate->heap();
- if (heap->InNewSpace(*object)) {
- heap->AddWeakNewSpaceObjectToCodeDependency(object, cell);
- } else {
- Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
- dep =
- DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
- heap->AddWeakObjectToCodeDependency(object, dep);
- }
-}
-
-} // namespace
-
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
Handle<Code> code, Isolate* isolate) {
DCHECK(code->is_optimized_code());
std::vector<Handle<Map>> maps;
- std::vector<Handle<HeapObject>> objects;
{
DisallowHeapAllocation no_gc;
int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -888,38 +881,29 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
isolate);
if (object->IsMap()) {
maps.push_back(Handle<Map>::cast(object));
- } else {
- objects.push_back(object);
}
}
}
}
for (Handle<Map> map : maps) {
- if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
- isolate->heap()->AddRetainedMap(map);
- }
- Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
- }
- for (Handle<HeapObject> object : objects) {
- AddWeakObjectToCodeDependency(isolate, object, code);
+ isolate->heap()->AddRetainedMap(map);
}
code->set_can_have_weak_objects(true);
}
-class PipelineWasmCompilationJob final : public CompilationJob {
+class PipelineWasmCompilationJob final : public OptimizedCompilationJob {
public:
explicit PipelineWasmCompilationJob(
- CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+ OptimizedCompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- std::vector<trap_handler::ProtectedInstructionData>* protected_insts,
- bool asmjs_origin)
- : CompilationJob(isolate->stack_guard()->real_climit(), nullptr, info,
- "TurboFan", State::kReadyToExecute),
+ WasmCompilationData* wasm_compilation_data, bool asmjs_origin)
+ : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), info,
+ "TurboFan", State::kReadyToExecute),
zone_stats_(isolate->allocator()),
pipeline_statistics_(CreatePipelineStatistics(
Handle<Script>::null(), info, isolate, &zone_stats_)),
data_(&zone_stats_, isolate, info, jsgraph, pipeline_statistics_.get(),
- source_positions, protected_insts),
+ source_positions, wasm_compilation_data),
pipeline_(&data_),
linkage_(call_descriptor),
asmjs_origin_(asmjs_origin) {}
@@ -992,23 +976,18 @@ size_t PipelineWasmCompilationJob::AllocatedMemory() const {
PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
- if (!FLAG_wasm_jit_to_native) {
- pipeline_.FinalizeCode();
- ValidateImmovableEmbeddedObjects();
- } else {
- CodeGenerator* code_generator = pipeline_.data_->code_generator();
- CompilationInfo::WasmCodeDesc* wasm_code_desc =
- compilation_info()->wasm_code_desc();
- code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
- wasm_code_desc->safepoint_table_offset =
- code_generator->GetSafepointTableOffset();
- wasm_code_desc->handler_table_offset =
- code_generator->GetHandlerTableOffset();
- wasm_code_desc->frame_slot_count =
- code_generator->frame()->GetTotalFrameSlotCount();
- wasm_code_desc->source_positions_table =
- code_generator->GetSourcePositionTable();
- }
+ CodeGenerator* code_generator = pipeline_.data_->code_generator();
+ OptimizedCompilationInfo::WasmCodeDesc* wasm_code_desc =
+ compilation_info()->wasm_code_desc();
+ code_generator->tasm()->GetCode(isolate, &wasm_code_desc->code_desc);
+ wasm_code_desc->safepoint_table_offset =
+ code_generator->GetSafepointTableOffset();
+ wasm_code_desc->handler_table_offset =
+ code_generator->GetHandlerTableOffset();
+ wasm_code_desc->frame_slot_count =
+ code_generator->frame()->GetTotalFrameSlotCount();
+ wasm_code_desc->source_positions_table =
+ code_generator->GetSourcePositionTable();
return SUCCEEDED;
}
@@ -1043,8 +1022,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
bool is_wasm = target->IsCode() &&
(Code::cast(target)->kind() == Code::WASM_FUNCTION ||
- Code::cast(target)->kind() == Code::WASM_TO_JS_FUNCTION ||
- Code::cast(target)->kind() == Code::WASM_TO_WASM_FUNCTION);
+ Code::cast(target)->kind() == Code::WASM_TO_JS_FUNCTION);
bool is_allowed_stub = false;
if (target->IsCode()) {
Code* code = Code::cast(target);
@@ -1091,7 +1069,8 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector()),
data->info()->osr_offset(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags);
+ SourcePosition::kNotInlined, flags, true,
+ data->info()->is_analyze_environment_liveness());
graph_builder.CreateGraph();
}
};
@@ -1111,7 +1090,8 @@ Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
return Nothing<OuterContext>();
}
-Maybe<OuterContext> ChooseSpecializationContext(CompilationInfo* info) {
+Maybe<OuterContext> ChooseSpecializationContext(
+ OptimizedCompilationInfo* info) {
if (info->is_function_context_specializing()) {
DCHECK(info->has_context());
return Just(OuterContext(handle(info->context()), 0));
@@ -1387,15 +1367,21 @@ struct EffectControlLinearizationPhase {
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), data->isolate(), schedule);
+ // We only insert the array masking code if
+ // - untrusted code mitigations are on,
+ // - general load poisoning is off.
+ // TODO(jarin) Remove the array index masking code entirely once we have
+ // restricted load poisoning.
+ EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
+ (data->info()->has_untrusted_code_mitigations() &&
+ !data->info()->is_poison_loads())
+ ? EffectControlLinearizer::kMaskArrayIndex
+ : EffectControlLinearizer::kDoNotMaskArrayIndex;
// Post-pass for wiring the control/effects
// - connect allocating representation changes into the control&effect
// chains and lower them,
// - get rid of the region markers,
// - introduce effect phis and rewire effects to get SSA again.
- EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
- data->info()->has_untrusted_code_mitigations()
- ? EffectControlLinearizer::kMaskArrayIndex
- : EffectControlLinearizer::kDoNotMaskArrayIndex;
EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
data->source_positions(),
mask_array_index);
@@ -1472,10 +1458,13 @@ struct MemoryOptimizationPhase {
trimmer.TrimGraph(roots.begin(), roots.end());
// Optimize allocations and load/store operations.
- MemoryOptimizer optimizer(data->jsgraph(), temp_zone,
- data->info()->is_poison_loads()
- ? LoadPoisoning::kDoPoison
- : LoadPoisoning::kDontPoison);
+ MemoryOptimizer optimizer(
+ data->jsgraph(), temp_zone,
+ data->info()->is_poison_loads() ? PoisoningMitigationLevel::kOn
+ : PoisoningMitigationLevel::kOff,
+ data->info()->is_allocation_folding_enabled()
+ ? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
+ : MemoryOptimizer::AllocationFolding::kDontAllocationFolding);
optimizer.Optimize();
}
};
@@ -1554,9 +1543,6 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
- data->info()->is_generating_speculation_poison_on_entry()
- ? InstructionSelector::kEnableSpeculationPoison
- : InstructionSelector::kDisableSpeculationPoison,
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1567,8 +1553,8 @@ struct InstructionSelectionPhase {
data->isolate()->serializer_enabled()
? InstructionSelector::kEnableSerialization
: InstructionSelector::kDisableSerialization,
- data->info()->is_poison_loads() ? LoadPoisoning::kDoPoison
- : LoadPoisoning::kDontPoison);
+ data->info()->is_poison_loads() ? PoisoningMitigationLevel::kOn
+ : PoisoningMitigationLevel::kOff);
if (!selector.SelectInstructions()) {
data->set_compilation_failed();
}
@@ -1762,7 +1748,7 @@ struct PrintGraphPhase {
static const char* phase_name() { return nullptr; }
void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
- CompilationInfo* info = data->info();
+ OptimizedCompilationInfo* info = data->info();
Graph* graph = data->graph();
if (FLAG_trace_turbo) { // Print JSON.
@@ -1805,7 +1791,6 @@ struct VerifyGraphPhase {
switch (data->info()->code_kind()) {
case Code::WASM_FUNCTION:
case Code::WASM_TO_JS_FUNCTION:
- case Code::WASM_TO_WASM_FUNCTION:
case Code::JS_TO_WASM_FUNCTION:
case Code::WASM_INTERPRETER_ENTRY:
case Code::C_WASM_ENTRY:
@@ -1985,11 +1970,16 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Handle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
- uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt) {
- CompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
+ uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
+ PoisoningMitigationLevel poisoning_enabled) {
+ OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
info.set_builtin_index(builtin_index);
info.set_stub_key(stub_key);
+ if (poisoning_enabled == PoisoningMitigationLevel::kOn) {
+ info.MarkAsPoisonLoads();
+ }
+
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
SourcePositionTable source_positions(graph);
@@ -2024,7 +2014,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+Handle<Code> Pipeline::GenerateCodeForTesting(OptimizedCompilationInfo* info,
Isolate* isolate) {
ZoneStats zone_stats(isolate->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
@@ -2043,7 +2033,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+Handle<Code> Pipeline::GenerateCodeForTesting(OptimizedCompilationInfo* info,
Isolate* isolate, Graph* graph,
Schedule* schedule) {
auto call_descriptor = Linkage::ComputeIncoming(info->zone(), info);
@@ -2053,8 +2043,9 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
// static
Handle<Code> Pipeline::GenerateCodeForTesting(
- CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule, SourcePositionTable* source_positions) {
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ CallDescriptor* call_descriptor, Graph* graph, Schedule* schedule,
+ SourcePositionTable* source_positions) {
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
// TODO(wasm): Refactor code generation to check for non-existing source
@@ -2089,33 +2080,28 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
- bool has_script) {
+OptimizedCompilationJob* Pipeline::NewCompilationJob(
+ Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared = handle(function->shared());
- ParseInfo* parse_info;
- if (!has_script) {
- parse_info = ParseInfo::AllocateWithoutScript(shared);
- } else {
- parse_info = new ParseInfo(shared);
- }
- return new PipelineCompilationJob(parse_info, shared, function);
+ return new PipelineCompilationJob(shared, function);
}
// static
-CompilationJob* Pipeline::NewWasmCompilationJob(
- CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+OptimizedCompilationJob* Pipeline::NewWasmCompilationJob(
+ OptimizedCompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ WasmCompilationData* wasm_compilation_data,
wasm::ModuleOrigin asmjs_origin) {
return new PipelineWasmCompilationJob(info, isolate, jsgraph, call_descriptor,
- source_positions,
- protected_instructions, asmjs_origin);
+ source_positions, wasm_compilation_data,
+ asmjs_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- CompilationInfo info(ArrayVector("testing"), sequence->zone(), Code::STUB);
+ OptimizedCompilationInfo info(ArrayVector("testing"), sequence->zone(),
+ Code::STUB);
ZoneStats zone_stats(sequence->isolate()->allocator());
PipelineData data(&zone_stats, &info, sequence->isolate(), sequence);
PipelineImpl pipeline(&data);
@@ -2214,13 +2200,12 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
// Allocate registers.
if (call_descriptor->HasRestrictedAllocatableRegisters()) {
- auto registers = call_descriptor->AllocatableRegisters();
+ RegList registers = call_descriptor->AllocatableRegisters();
DCHECK_LT(0, NumRegs(registers));
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
} else if (data->info()->is_poison_loads()) {
- CHECK(InstructionSelector::SupportsSpeculationPoisoning());
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else {
@@ -2403,7 +2388,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
data->DeleteRegisterAllocationZone();
}
-CompilationInfo* PipelineImpl::info() const { return data_->info(); }
+OptimizedCompilationInfo* PipelineImpl::info() const { return data_->info(); }
Isolate* PipelineImpl::isolate() const { return data_->isolate(); }
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 92b128c357..33fb965c83 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -15,15 +15,11 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
-class CompilationJob;
+class OptimizedCompilationInfo;
+class OptimizedCompilationJob;
class RegisterConfiguration;
class JumpOptimizationInfo;
-namespace trap_handler {
-struct ProtectedInstructionData;
-} // namespace trap_handler
-
namespace wasm {
enum ModuleOrigin : uint8_t;
} // namespace wasm
@@ -36,19 +32,19 @@ class Graph;
class InstructionSequence;
class Schedule;
class SourcePositionTable;
+class WasmCompilationData;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given function.
- static CompilationJob* NewCompilationJob(Handle<JSFunction> function,
- bool has_script);
+ static OptimizedCompilationJob* NewCompilationJob(Handle<JSFunction> function,
+ bool has_script);
// Returns a new compilation job for the WebAssembly compilation info.
- static CompilationJob* NewWasmCompilationJob(
- CompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
+ static OptimizedCompilationJob* NewWasmCompilationJob(
+ OptimizedCompilationInfo* info, Isolate* isolate, JSGraph* jsgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
- std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions,
+ WasmCompilationData* wasm_compilation_data,
wasm::ModuleOrigin wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
@@ -56,16 +52,17 @@ class Pipeline : public AllStatic {
static Handle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
Schedule* schedule, Code::Kind kind, const char* debug_name,
- uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt);
+ uint32_t stub_key, int32_t builtin_index, JumpOptimizationInfo* jump_opt,
+ PoisoningMitigationLevel poisoning_enabled);
// Run the entire pipeline and generate a handle to a code object suitable for
// testing.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+ static Handle<Code> GenerateCodeForTesting(OptimizedCompilationInfo* info,
Isolate* isolate);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+ static Handle<Code> GenerateCodeForTesting(OptimizedCompilationInfo* info,
Isolate* isolate, Graph* graph,
Schedule* schedule = nullptr);
@@ -77,8 +74,9 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
V8_EXPORT_PRIVATE static Handle<Code> GenerateCodeForTesting(
- CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule = nullptr,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule = nullptr,
SourcePositionTable* source_positions = nullptr);
private:
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 6bdf8fa974..2a230f103c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -6,12 +6,12 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
+#include "src/optimized-compilation-info.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
@@ -794,12 +794,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
Register scratch = kScratchReg;
-
- Label current_pc;
- __ mov_label_addr(scratch, &current_pc);
-
- __ bind(&current_pc);
- __ subi(scratch, scratch, Operand(__ pc_offset()));
+ __ ComputeCodeStartAddress(scratch);
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
@@ -814,11 +809,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
void CodeGenerator::BailoutIfDeoptimized() {
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
- Label current_pc;
- __ mov_label_addr(ip, &current_pc);
-
- __ bind(&current_pc);
- __ subi(ip, ip, Operand(__ pc_offset()));
+ __ ComputeCodeStartAddress(ip);
__ cmp(ip, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
@@ -833,7 +824,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = kScratchReg;
Label current_pc;
@@ -844,18 +835,11 @@ void CodeGenerator::GenerateSpeculationPoison() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerPointer - 1))
- __ mr(kSpeculationPoisonRegister, scratch);
- __ sub(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sub(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- scratch);
- __ orx(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ ShiftRightArithImm(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerPointer - 1);
- __ notx(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+ __ cmp(kJavaScriptCallCodeStartRegister, scratch);
+ __ li(scratch, Operand::Zero());
+ __ notx(kSpeculationPoisonRegister, scratch);
+ __ isel(eq, kSpeculationPoisonRegister,
+ kSpeculationPoisonRegister, scratch);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
@@ -1109,6 +1093,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mr(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ mr(i.OutputRegister(), kRootRegister);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
@@ -1151,6 +1139,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
+ case kArchPoisonOnSpeculationWord:
+ __ and_(i.OutputRegister(), i.InputRegister(0),
+ kSpeculationPoisonRegister);
+ break;
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1553,8 +1545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
__ Move(d1, d3);
break;
}
@@ -2259,7 +2250,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
@@ -2325,7 +2316,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Constant pool is unavailable since the frame has been destructed
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ DCHECK(Constant::kInt32 == g.ToConstant(pop).type() ||
+ Constant::kInt64 == g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
} else {
__ Drop(g.ToRegister(pop));
@@ -2367,7 +2359,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_PPC64
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
+ if (false) {
#else
if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
@@ -2381,7 +2373,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
#endif
__ mov(dst, Operand(src.ToInt64()));
#if V8_TARGET_ARCH_PPC64
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 70a6c9ee69..b96c4d5732 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -39,9 +39,9 @@ class PPCOperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node, ImmediateMode mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
+ value = OpParameter<int32_t>(node->op());
else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
+ value = OpParameter<int64_t>(node->op());
else
return false;
return CanBeImmediate(value, mode);
@@ -129,11 +129,6 @@ void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
if (cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure that
// the deopt inputs are not overwritten by the binop result. One way
@@ -142,26 +137,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} else {
outputs[output_count++] = g.DefineAsRegister(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -236,7 +219,7 @@ void InstructionSelector::VisitLoad(Node* node) {
}
if (node->opcode() == IrOpcode::kPoisonedLoad &&
- load_poisoning_ == LoadPoisoning::kDoPoison) {
+ poisoning_enabled_ == PoisoningMitigationLevel::kOn) {
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -1486,22 +1469,7 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- PPCOperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1720,20 +1688,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kPPC_Sub, index_operand, value_operand,
- g.TempImmediate(sw.min_value));
+ g.TempImmediate(sw.min_value()));
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -2238,6 +2206,16 @@ void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2260,9 +2238,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index bead0618f6..284da9350a 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -47,32 +47,17 @@ bool HasOnlyNumberMaps(MapHandles const& maps) {
return true;
}
-bool HasOnlySequentialStringMaps(MapHandles const& maps) {
- for (auto map : maps) {
- if (!map->IsStringMap()) return false;
- if (!StringShape(map->instance_type()).IsSequential()) {
- return false;
- }
- }
- return true;
-}
-
} // namespace
bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
Node** receiver, Node** effect,
Node* control) {
if (HasOnlyStringMaps(maps)) {
- if (HasOnlySequentialStringMaps(maps)) {
- *receiver = *effect = graph()->NewNode(simplified()->CheckSeqString(),
- *receiver, *effect, control);
- } else {
- // Monormorphic string access (ignoring the fact that there are multiple
- // String maps).
- *receiver = *effect =
- graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
- *receiver, *effect, control);
- }
+ // Monormorphic string access (ignoring the fact that there are multiple
+ // String maps).
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), *receiver,
+ *effect, control);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 778752e50f..a2b2269456 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -7,7 +7,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -16,7 +16,8 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineOperatorBuilder::Flags flags,
- MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements,
+ PoisoningMitigationLevel poisoning_enabled)
: isolate_(isolate),
graph_(graph),
schedule_(new (zone()) Schedule(zone())),
@@ -24,7 +25,8 @@ RawMachineAssembler::RawMachineAssembler(
common_(zone()),
call_descriptor_(call_descriptor),
parameters_(parameter_count(), zone()),
- current_block_(schedule()->start()) {
+ current_block_(schedule()->start()),
+ poisoning_enabled_(poisoning_enabled) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
@@ -88,7 +90,9 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = MakeNode(common()->Branch(), 1, &condition);
+ Node* branch = MakeNode(
+ common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
+ &condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
current_block_ = nullptr;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 3d689a089c..251a13416e 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -12,8 +12,8 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
namespace v8 {
namespace internal {
@@ -44,7 +44,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport());
+ FullUnalignedAccessSupport(),
+ PoisoningMitigationLevel poisoning_enabled =
+ PoisoningMitigationLevel::kOn);
~RawMachineAssembler() {}
Isolate* isolate() const { return isolate_; }
@@ -53,6 +55,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
+ PoisoningMitigationLevel poisoning_enabled() const {
+ return poisoning_enabled_;
+ }
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
@@ -116,11 +121,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType rep, Node* base) {
- return Load(rep, base, IntPtrConstant(0));
- }
- Node* Load(MachineType rep, Node* base, Node* index) {
- return AddNode(machine()->Load(rep), base, index);
+ Node* Load(MachineType rep, Node* base,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ return Load(rep, base, IntPtrConstant(0), needs_poisoning);
+ }
+ Node* Load(MachineType rep, Node* base, Node* index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ const Operator* op = machine()->Load(rep);
+ if (needs_poisoning == LoadSensitivity::kNeedsPoisoning &&
+ poisoning_enabled_ == PoisoningMitigationLevel::kOn) {
+ op = machine()->PoisonedLoad(rep);
+ }
+ return AddNode(op, base, index);
}
Node* Store(MachineRepresentation rep, Node* base, Node* value,
WriteBarrierKind write_barrier) {
@@ -723,6 +735,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->LoadParentFramePointer());
}
+ // Root pointer operations.
+ Node* LoadRootsPointer() { return AddNode(machine()->LoadRootsPointer()); }
+
// Parameters.
Node* Parameter(size_t index);
@@ -744,8 +759,17 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- Node* SpeculationPoison() {
- return AddNode(machine()->SpeculationPoison(), graph()->start());
+
+ Node* PoisonOnSpeculationTagged(Node* value) {
+ if (poisoning_enabled_ == PoisoningMitigationLevel::kOn)
+ return AddNode(machine()->PoisonOnSpeculationTagged(), value);
+ return value;
+ }
+
+ Node* PoisonOnSpeculationWord(Node* value) {
+ if (poisoning_enabled_ == PoisoningMitigationLevel::kOn)
+ return AddNode(machine()->PoisonOnSpeculationWord(), value);
+ return value;
}
// Call a given call descriptor and the given arguments.
@@ -908,8 +932,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder common_;
CallDescriptor* call_descriptor_;
NodeVector parameters_;
- Node* speculation_poison_;
BasicBlock* current_block_;
+ PoisoningMitigationLevel poisoning_enabled_;
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index eedf946fb6..25f42efdcd 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -29,7 +29,6 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckReceiver:
- case IrOpcode::kCheckSeqString:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckSymbol:
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 452ff705bd..8c150d975a 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -167,14 +167,18 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->value_ = unallocated->fixed_slot_index();
} else {
switch (unallocated->extended_policy()) {
- case UnallocatedOperand::ANY:
+ case UnallocatedOperand::REGISTER_OR_SLOT:
case UnallocatedOperand::NONE:
if (sequence()->IsFP(vreg)) {
- constraint->type_ = kNoneFP;
+ constraint->type_ = kRegisterOrSlotFP;
} else {
- constraint->type_ = kNone;
+ constraint->type_ = kRegisterOrSlot;
}
break;
+ case UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
+ DCHECK(!sequence()->IsFP(vreg));
+ constraint->type_ = kRegisterOrSlotOrConstant;
+ break;
case UnallocatedOperand::FIXED_REGISTER:
if (unallocated->HasSecondaryStorage()) {
constraint->type_ = kRegisterAndSlot;
@@ -252,12 +256,16 @@ void RegisterAllocatorVerifier::CheckConstraint(
CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
constraint->value_);
return;
- case kNone:
+ case kRegisterOrSlot:
CHECK_WITH_MSG(op->IsRegister() || op->IsStackSlot(), caller_info_);
return;
- case kNoneFP:
+ case kRegisterOrSlotFP:
CHECK_WITH_MSG(op->IsFPRegister() || op->IsFPStackSlot(), caller_info_);
return;
+ case kRegisterOrSlotOrConstant:
+ CHECK_WITH_MSG(op->IsRegister() || op->IsStackSlot() || op->IsConstant(),
+ caller_info_);
+ return;
case kSameAsFirst:
CHECK_WITH_MSG(false, caller_info_);
return;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 7e22ab22ad..140b3a3ef5 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -185,8 +185,9 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kFixedFPRegister,
kSlot,
kFixedSlot,
- kNone,
- kNoneFP,
+ kRegisterOrSlot,
+ kRegisterOrSlotFP,
+ kRegisterOrSlotOrConstant,
kExplicit,
kSameAsFirst,
kRegisterAndSlot
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 43eb408f1e..a75317914a 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -261,7 +261,7 @@ UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
: operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
bool register_beneficial = true;
- UsePositionType type = UsePositionType::kAny;
+ UsePositionType type = UsePositionType::kRegisterOrSlot;
if (operand_ != nullptr && operand_->IsUnallocated()) {
const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
if (unalloc->HasRegisterPolicy()) {
@@ -269,8 +269,11 @@ UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
} else if (unalloc->HasSlotPolicy()) {
type = UsePositionType::kRequiresSlot;
register_beneficial = false;
+ } else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
+ type = UsePositionType::kRegisterOrSlotOrConstant;
+ register_beneficial = false;
} else {
- register_beneficial = !unalloc->HasAnyPolicy();
+ register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
}
}
flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
@@ -714,7 +717,8 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
case UsePositionType::kRequiresRegister:
DCHECK(op.IsRegister() || op.IsFPRegister());
V8_FALLTHROUGH;
- case UsePositionType::kAny:
+ case UsePositionType::kRegisterOrSlot:
+ case UsePositionType::kRegisterOrSlotOrConstant:
InstructionOperand::ReplaceWith(pos->operand(), &op);
break;
}
@@ -748,7 +752,8 @@ void LiveRange::SetUseHints(int register_index) {
case UsePositionType::kRequiresSlot:
break;
case UsePositionType::kRequiresRegister:
- case UsePositionType::kAny:
+ case UsePositionType::kRegisterOrSlot:
+ case UsePositionType::kRegisterOrSlotOrConstant:
pos->set_assigned_register(register_index);
break;
}
@@ -1674,7 +1679,8 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
int gap_index = successor->first_instruction_index();
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
- UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
+ UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ output_vreg);
data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
}
}
@@ -1715,7 +1721,8 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
bool assigned = false;
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
- UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
+ UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ output_vreg);
bool is_tagged = code()->IsReference(output_vreg);
if (first_output->HasSecondaryStorage()) {
range->MarkHasPreassignedSlot();
@@ -1757,7 +1764,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
- UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
+ UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ input_vreg);
bool is_tagged = code()->IsReference(input_vreg);
AllocateFixed(cur_input, instr_index, is_tagged);
data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
@@ -1774,7 +1782,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
- UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
+ UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ input_vreg);
*cur_input =
UnallocatedOperand(*cur_input, second_output->virtual_register());
MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
@@ -1816,7 +1825,8 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
- UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
+ UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
+ phi->operands()[i]);
MoveOperands* move = data()->AddGapMove(
cur_block->last_instruction_index(), Instruction::END, input, output);
map_value->AddOperand(&move->destination());
@@ -2410,8 +2420,11 @@ void LiveRangeBuilder::BuildLiveRanges() {
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
for (UsePosition* pos = range->first_pos(); pos != nullptr;
pos = pos->next()) {
- if (pos->type() == UsePositionType::kRequiresSlot) continue;
- UsePositionType new_type = UsePositionType::kAny;
+ if (pos->type() == UsePositionType::kRequiresSlot ||
+ pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
+ continue;
+ }
+ UsePositionType new_type = UsePositionType::kRegisterOrSlot;
// Can't mark phis as needing a register.
if (!pos->pos().IsGapPosition()) {
new_type = UsePositionType::kRequiresRegister;
@@ -2639,7 +2652,7 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end) {
int start_instr = start.ToInstructionIndex();
int end_instr = end.ToInstructionIndex();
- DCHECK(start_instr <= end_instr);
+ DCHECK_LE(start_instr, end_instr);
// We have no choice
if (start_instr == end_instr) return end;
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 4f6002874c..b5d73a5bba 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -229,9 +229,12 @@ class UseInterval final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(UseInterval);
};
-
-enum class UsePositionType : uint8_t { kAny, kRequiresRegister, kRequiresSlot };
-
+enum class UsePositionType : uint8_t {
+ kRegisterOrSlot,
+ kRegisterOrSlotOrConstant,
+ kRequiresRegister,
+ kRequiresSlot
+};
enum class UsePositionHintType : uint8_t {
kNone,
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 739fb421ab..34b532a6c6 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -10,7 +10,7 @@
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
namespace v8 {
namespace internal {
@@ -483,7 +483,7 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
switch (node->opcode()) {
case IrOpcode::kNumberConstant:
return jsgraph()->Float32Constant(
- DoubleToFloat32(OpParameter<double>(node)));
+ DoubleToFloat32(OpParameter<double>(node->op())));
case IrOpcode::kInt32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
@@ -543,7 +543,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
// TODO(jarin) Handle checked constant conversions.
switch (node->opcode()) {
case IrOpcode::kNumberConstant:
- return jsgraph()->Float64Constant(OpParameter<double>(node));
+ return jsgraph()->Float64Constant(OpParameter<double>(node->op()));
case IrOpcode::kInt32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
@@ -608,6 +608,16 @@ Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
+void RepresentationChanger::InsertUnconditionalDeopt(Node* node,
+ DeoptimizeReason reason) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* deopt =
+ jsgraph()->graph()->NewNode(simplified()->CheckIf(reason),
+ jsgraph()->Int32Constant(0), effect, control);
+ NodeProperties::ReplaceEffectInput(node, deopt);
+}
+
Node* RepresentationChanger::GetWord32RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type,
Node* use_node, UseInfo use_info) {
@@ -619,7 +629,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
UNREACHABLE();
break;
case IrOpcode::kNumberConstant: {
- double const fv = OpParameter<double>(node);
+ double const fv = OpParameter<double>(node->op());
if (use_info.type_check() == TypeCheckKind::kNone ||
((use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) &&
@@ -639,7 +649,17 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
- return node; // Sloppy comparison -> word32
+ CHECK(output_type->Is(Type::Boolean()));
+ if (use_info.truncation().IsUsedAsWord32()) {
+ return node;
+ } else {
+ CHECK(Truncation::Any(kIdentifyZeros)
+ .IsLessGeneralThan(use_info.truncation()));
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
+ InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
+ }
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 571f13cd7d..ed6c0a596e 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -337,8 +337,8 @@ class RepresentationChanger final {
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
Node* InsertChangeUint32ToFloat64(Node* node);
-
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
+ void InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 8327528c71..9d7c3d8a40 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -6,11 +6,11 @@
#include "src/assembler-inl.h"
#include "src/callable.h"
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/optimized-compilation-info.h"
#include "src/s390/macro-assembler-s390.h"
namespace v8 {
@@ -1041,8 +1041,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {
Register scratch = r1;
- int pc_offset = __ pc_offset();
- __ larl(scratch, Operand(-pc_offset/2));
+ __ ComputeCodeStartAddress(scratch);
__ CmpP(scratch, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
@@ -1057,8 +1056,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
void CodeGenerator::BailoutIfDeoptimized() {
if (FLAG_debug_code) {
// Check that {kJavaScriptCallCodeStartRegister} is correct.
- int pc_offset = __ pc_offset();
- __ larl(ip, Operand(-pc_offset/2));
+ __ ComputeCodeStartAddress(ip);
__ CmpP(ip, kJavaScriptCallCodeStartRegister);
__ Assert(eq, AbortReason::kWrongFunctionCodeStart);
}
@@ -1073,7 +1071,7 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Jump(code, RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
Register scratch = r1;
Label current_pc;
@@ -1084,18 +1082,10 @@ void CodeGenerator::GenerateSpeculationPoison() {
// Calculate a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerPointer - 1))
- __ LoadRR(kSpeculationPoisonRegister, scratch);
- __ SubP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ SubP(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- scratch);
- __ OrP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ ShiftRightArithP(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- Operand(kBitsPerPointer - 1));
- __ NotP(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
+ __ LoadImmP(kSpeculationPoisonRegister, Operand::Zero());
+ __ LoadImmP(r0, Operand(-1));
+ __ CmpP(kJavaScriptCallCodeStartRegister, scratch);
+ __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
@@ -1333,6 +1323,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadRR(i.OutputRegister(), fp);
}
break;
+ case kArchRootsPointer:
+ __ LoadRR(i.OutputRegister(), kRootRegister);
+ break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
@@ -1374,6 +1367,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
+ case kArchPoisonOnSpeculationWord:
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
+ __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
+ break;
case kS390_Abs32:
// TODO(john.yan): zero-ext
__ lpr(i.OutputRegister(0), i.InputRegister(0));
@@ -1802,8 +1799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
__ Move(d1, d3);
break;
}
@@ -2467,7 +2463,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
}
condition = NegateFlagsCondition(condition);
- __ XorP(r0, r0);
+ __ LoadImmP(r0, Operand::Zero());
__ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
kSpeculationPoisonRegister, r0);
}
@@ -2675,7 +2671,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
@@ -2775,7 +2771,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_S390X
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
+ if (false) {
#else
if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
@@ -2789,7 +2785,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Load(dst, Operand(src.ToInt64()));
}
#else
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 61a335d46e..2c658880c9 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -101,9 +101,9 @@ class S390OperandGenerator final : public OperandGenerator {
int64_t GetImmediate(Node* node) {
if (node->opcode() == IrOpcode::kInt32Constant)
- return OpParameter<int32_t>(node);
+ return OpParameter<int32_t>(node->op());
else if (node->opcode() == IrOpcode::kInt64Constant)
- return OpParameter<int64_t>(node);
+ return OpParameter<int64_t>(node->op());
else
UNIMPLEMENTED();
return 0L;
@@ -112,9 +112,9 @@ class S390OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node, OperandModes mode) {
int64_t value;
if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
+ value = OpParameter<int32_t>(node->op());
else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
+ value = OpParameter<int64_t>(node->op());
else
return false;
return CanBeImmediate(value, mode);
@@ -271,7 +271,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
ArchOpcode SelectLoadOpcode(Node* node) {
NodeMatcher m(node);
- DCHECK(m.IsLoad());
+ DCHECK(m.IsLoad() || m.IsPoisonedLoad());
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
@@ -583,11 +583,6 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
if (!cont->IsDeoptimize()) {
// If we can deoptimize as a result of the binop, we need to make sure
// that the deopt inputs are not overwritten by the binop result. One way
@@ -602,27 +597,13 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineSameAsFirst(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
-
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
template <class CanCombineWithLoad>
@@ -658,11 +639,6 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.TempImmediate(!canEliminateZeroExt);
}
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
if ((operand_mode & OperandMode::kAllowDistinctOps) &&
// If we can deoptimize as a result of the binop, we need to make sure
// that the deopt inputs are not overwritten by the binop result. One way
@@ -678,27 +654,13 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineSameAsFirst(node);
}
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
DCHECK_NE(0u, input_count);
DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
-
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
} // namespace
@@ -728,7 +690,7 @@ void InstructionSelector::VisitLoad(Node* node) {
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
opcode |= MiscField::encode(kMemoryAccessPoisoned);
}
@@ -1651,22 +1613,7 @@ namespace {
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- S390OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
@@ -1727,27 +1674,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseAnyExceptImmediate(right);
}
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- } else if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- } else {
- DCHECK(cont->IsDeoptimize());
- // nothing to do
- }
-
DCHECK(input_count <= 8 && output_count <= 1);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
@@ -1832,28 +1761,9 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
outputs[output_count++] = g.DefineAsRegister(value);
}
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- } else if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- } else if (cont->IsTrap()) {
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- } else {
- DCHECK(cont->IsDeoptimize());
- // nothing to do
- }
-
DCHECK(input_count <= 8 && output_count <= 2);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
} // namespace
@@ -2095,20 +2005,20 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
+ if (sw.min_value()) {
index_operand = g.TempRegister();
Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ value_operand, g.TempImmediate(-sw.min_value()));
}
#if V8_TARGET_ARCH_S390X
InstructionOperand index_operand_zero_ext = g.TempRegister();
@@ -2567,6 +2477,16 @@ void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2592,9 +2512,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 2ec3f11be7..41fad5da63 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -352,6 +352,25 @@ void Schedule::EnsureCFGWellFormedness() {
if (block->deferred()) {
EnsureDeferredCodeSingleEntryPoint(block);
}
+ } else {
+ EliminateNoopPhiNodes(block);
+ }
+ }
+}
+
+void Schedule::EliminateNoopPhiNodes(BasicBlock* block) {
+ // Ensure that useless phi nodes in blocks that only have a single predecessor
+ // -- which can happen with the automatically generated code in the CSA and
+ // torque -- are pruned.
+ if (block->PredecessorCount() == 1) {
+ for (size_t i = 0; i < block->NodeCount();) {
+ Node* node = block->NodeAt(i);
+ if (node->opcode() == IrOpcode::kPhi) {
+ node->ReplaceUses(node->InputAt(0));
+ block->RemoveNode(block->begin() + i);
+ } else {
+ ++i;
+ }
}
}
}
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index ed69958e8b..74e51c5341 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -274,6 +274,10 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
// Ensure properties of the CFG assumed by further stages.
void EnsureCFGWellFormedness();
+ // Eliminates no-op phi nodes added for blocks that only have a single
+ // predecessor. This ensures the property required for SSA deconstruction that
+ // the target block of a control flow split has no phis.
+ void EliminateNoopPhiNodes(BasicBlock* block);
// Ensure split-edge form for a hand-assembled schedule.
void EnsureSplitEdgeForm(BasicBlock* block);
// Ensure entry into a deferred block happens from a single hot block.
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 980c88a6e6..3316b92c0b 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -116,6 +116,8 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -182,7 +184,8 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16LtS) \
V(I8x16LeS) \
V(I8x16LtU) \
- V(I8x16LeU)
+ V(I8x16LeU) \
+ V(S8x16Shuffle)
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
@@ -661,7 +664,7 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(1, node->InputCount());
- int32_t shift_amount = OpParameter<int32_t>(node);
+ int32_t shift_amount = OpParameter<int32_t>(node->op());
Node* shift_node = graph()->NewNode(common()->Int32Constant(shift_amount));
Node** rep = GetReplacementsWithType(node->InputAt(0), type);
int num_lanes = NumLanes(type);
@@ -784,7 +787,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
- UnalignedLoadRepresentationOf(node->op()).representation();
+ LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
@@ -1024,6 +1027,22 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
#undef F32x4_UNOP_CASE
+ case IrOpcode::kF32x4RecipApprox:
+ case IrOpcode::kF32x4RecipSqrtApprox: {
+ DCHECK_EQ(1, node->InputCount());
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node* float_one = graph()->NewNode(common()->Float32Constant(1.0));
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* tmp = rep[i];
+ if (node->opcode() == IrOpcode::kF32x4RecipSqrtApprox) {
+ tmp = graph()->NewNode(machine()->Float32Sqrt(), rep[i]);
+ }
+ rep_node[i] = graph()->NewNode(machine()->Float32Div(), float_one, tmp);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
case IrOpcode::kF32x4SConvertI32x4: {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundInt32ToFloat32());
break;
@@ -1051,7 +1070,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kF32x4ExtractLane:
case IrOpcode::kI16x8ExtractLane:
case IrOpcode::kI8x16ExtractLane: {
- int32_t lane = OpParameter<int32_t>(node);
+ int32_t lane = OpParameter<int32_t>(node->op());
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
for (int i = 1; i < num_lanes; ++i) {
@@ -1066,7 +1085,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kI8x16ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
- int32_t lane = OpParameter<int32_t>(node);
+ int32_t lane = OpParameter<int32_t>(node->op());
Node** old_rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
@@ -1154,6 +1173,24 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kS8x16Shuffle: {
+ DCHECK_EQ(2, node->InputCount());
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(16);
+ for (int i = 0; i < 16; i++) {
+ int lane = shuffle[i];
+#if defined(V8_TARGET_BIG_ENDIAN)
+ rep_node[15 - i] =
+ lane < 16 ? rep_left[15 - lane] : rep_right[31 - lane];
+#else
+ rep_node[i] = lane < 16 ? rep_left[lane] : rep_right[lane - 16];
+#endif
+ }
+ ReplaceNode(node, rep_node, 16);
+ break;
+ }
default: { DefaultLowering(node); }
}
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index bde73f4e59..8a10ea71e2 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -126,6 +126,7 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint,
UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kTaggedSigned:
+ return UseInfo::TaggedSigned();
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
return UseInfo::AnyTagged();
@@ -1506,7 +1507,7 @@ class RepresentationSelector {
case IrOpcode::kExternalConstant:
return VisitLeaf(node, MachineType::PointerRepresentation());
case IrOpcode::kNumberConstant: {
- double const value = OpParameter<double>(node);
+ double const value = OpParameter<double>(node->op());
int value_as_int;
if (DoubleToSmiInteger(value, &value_as_int)) {
VisitLeaf(node, MachineRepresentation::kTaggedSigned);
@@ -1524,7 +1525,7 @@ class RepresentationSelector {
case IrOpcode::kPointerConstant: {
VisitLeaf(node, MachineType::PointerRepresentation());
if (lower()) {
- intptr_t const value = OpParameter<intptr_t>(node);
+ intptr_t const value = OpParameter<intptr_t>(node->op());
DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value));
}
return;
@@ -2348,47 +2349,22 @@ class RepresentationSelector {
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
- case IrOpcode::kStringCharAt: {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedPointer);
- return;
- }
case IrOpcode::kStringCharCodeAt: {
- Type* string_type = TypeOf(node->InputAt(0));
- if (string_type->Is(Type::SeqString())) {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
- NodeProperties::ChangeOp(node, simplified()->SeqStringCharCodeAt());
- }
- } else {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- }
- return;
+ return VisitBinop(node, UseInfo::AnyTagged(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
}
case IrOpcode::kStringCodePointAt: {
- Type* string_type = TypeOf(node->InputAt(0));
- if (string_type->Is(Type::SeqString())) {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) {
- UnicodeEncoding encoding = UnicodeEncodingOf(node->op());
- NodeProperties::ChangeOp(
- node, simplified()->SeqStringCodePointAt(encoding));
- }
- } else {
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
- }
- return;
+ return VisitBinop(node, UseInfo::AnyTagged(),
+ UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
}
- case IrOpcode::kStringFromCharCode: {
+ case IrOpcode::kStringFromSingleCharCode: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kStringFromCodePoint: {
+ case IrOpcode::kStringFromSingleCodePoint: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kTaggedPointer);
return;
@@ -2512,17 +2488,6 @@ class RepresentationSelector {
VisitCheck(node, Type::Symbol(), lowering);
return;
}
- case IrOpcode::kCheckSeqString: {
- if (InputIs(node, Type::SeqString())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
- return;
- }
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
@@ -2551,6 +2516,11 @@ class RepresentationSelector {
MachineRepresentation field_representation =
access.machine_type.representation();
+ // Convert to Smi if possible, such that we can avoid a write barrier.
+ if (field_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ field_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, field_representation, access.offset,
access.type, input_info->representation(), value_node);
@@ -2584,6 +2554,11 @@ class RepresentationSelector {
MachineRepresentation element_representation =
access.machine_type.representation();
+ // Convert to Smi if possible, such that we can avoid a write barrier.
+ if (element_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ element_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, element_representation, access.type,
input_info->representation(), value_node);
@@ -2769,6 +2744,89 @@ class RepresentationSelector {
VisitObjectIs(node, Type::DetectableCallable(), lowering);
return;
}
+ case IrOpcode::kObjectIsFiniteNumber: {
+ Type* const input_type = GetUpperBound(node->InputAt(0));
+ if (input_type->Is(type_cache_.kSafeInteger)) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else if (!input_type->Maybe(Type::Number())) {
+ VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ } else if (input_type->Is(Type::Number())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ lowering->simplified()->NumberIsFinite());
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ }
+ return;
+ }
+ case IrOpcode::kNumberIsFinite: {
+ UNREACHABLE();
+ }
+ case IrOpcode::kObjectIsSafeInteger: {
+ Type* const input_type = GetUpperBound(node->InputAt(0));
+ if (input_type->Is(type_cache_.kSafeInteger)) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else if (!input_type->Maybe(Type::Number())) {
+ VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ } else if (input_type->Is(Type::Number())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) {
+ NodeProperties::ChangeOp(
+ node, lowering->simplified()->NumberIsSafeInteger());
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ }
+ return;
+ }
+ case IrOpcode::kNumberIsSafeInteger: {
+ UNREACHABLE();
+ }
+ case IrOpcode::kObjectIsInteger: {
+ Type* const input_type = GetUpperBound(node->InputAt(0));
+ if (input_type->Is(type_cache_.kSafeInteger)) {
+ VisitUnop(node, UseInfo::None(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(1));
+ }
+ } else if (!input_type->Maybe(Type::Number())) {
+ VisitUnop(node, UseInfo::Any(), MachineRepresentation::kBit);
+ if (lower()) {
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
+ }
+ } else if (input_type->Is(Type::Number())) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ lowering->simplified()->NumberIsInteger());
+ }
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+ }
+ return;
+ }
+ case IrOpcode::kNumberIsInteger: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kObjectIsMinusZero: {
Type* const input_type = GetUpperBound(node->InputAt(0));
if (input_type->Is(Type::MinusZero())) {
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index f4802a96d0..5a09dc34ae 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -550,9 +550,8 @@ Type* AllocateTypeOf(const Operator* op) {
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kStringFromCodePoint ||
- op->opcode() == IrOpcode::kStringCodePointAt ||
- op->opcode() == IrOpcode::kSeqStringCodePointAt);
+ DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint ||
+ op->opcode() == IrOpcode::kStringCodePointAt);
return OpParameter<UnicodeEncoding>(op);
}
@@ -671,7 +670,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
@@ -710,6 +709,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(SameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
@@ -721,9 +726,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
- V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringSubstring, Operator::kNoProperties, 3, 1)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -739,7 +742,6 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckInternalizedString, 1, 1) \
V(CheckNotTaggedHole, 1, 1) \
V(CheckReceiver, 1, 1) \
- V(CheckSeqString, 1, 1) \
V(CheckSymbol, 1, 1) \
V(CheckedInt32Add, 2, 1) \
V(CheckedInt32Div, 2, 1) \
@@ -834,30 +836,17 @@ struct SimplifiedOperatorGlobalCache final {
kStringCodePointAtOperatorUTF32;
template <UnicodeEncoding kEncoding>
- struct SeqStringCodePointAtOperator final
+ struct StringFromSingleCodePointOperator final
: public Operator1<UnicodeEncoding> {
- SeqStringCodePointAtOperator()
- : Operator1<UnicodeEncoding>(IrOpcode::kSeqStringCodePointAt,
- Operator::kFoldable | Operator::kNoThrow,
- "SeqStringCodePointAt", 2, 1, 1, 1, 1, 0,
- kEncoding) {}
- };
- SeqStringCodePointAtOperator<UnicodeEncoding::UTF16>
- kSeqStringCodePointAtOperatorUTF16;
- SeqStringCodePointAtOperator<UnicodeEncoding::UTF32>
- kSeqStringCodePointAtOperatorUTF32;
-
- template <UnicodeEncoding kEncoding>
- struct StringFromCodePointOperator final : public Operator1<UnicodeEncoding> {
- StringFromCodePointOperator()
- : Operator1<UnicodeEncoding>(IrOpcode::kStringFromCodePoint,
- Operator::kPure, "StringFromCodePoint", 1,
- 0, 0, 1, 0, 0, kEncoding) {}
+ StringFromSingleCodePointOperator()
+ : Operator1<UnicodeEncoding>(
+ IrOpcode::kStringFromSingleCodePoint, Operator::kPure,
+ "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {}
};
- StringFromCodePointOperator<UnicodeEncoding::UTF16>
- kStringFromCodePointOperatorUTF16;
- StringFromCodePointOperator<UnicodeEncoding::UTF32>
- kStringFromCodePointOperatorUTF32;
+ StringFromSingleCodePointOperator<UnicodeEncoding::UTF16>
+ kStringFromSingleCodePointOperatorUTF16;
+ StringFromSingleCodePointOperator<UnicodeEncoding::UTF32>
+ kStringFromSingleCodePointOperatorUTF32;
struct ArrayBufferWasNeuteredOperator final : public Operator {
ArrayBufferWasNeuteredOperator()
@@ -1430,6 +1419,11 @@ const Operator* SimplifiedOperatorBuilder::NewArgumentsElements(
mapped_count); // parameter
}
+int NewArgumentsElementsMappedCountOf(const Operator* op) {
+ DCHECK_EQ(IrOpcode::kNewArgumentsElements, op->opcode());
+ return OpParameter<int>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::Allocate(Type* type,
PretenureFlag pretenure) {
return new (zone()) Operator1<AllocateParameters>(
@@ -1457,24 +1451,13 @@ const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
UNREACHABLE();
}
-const Operator* SimplifiedOperatorBuilder::SeqStringCodePointAt(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kSeqStringCodePointAtOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kSeqStringCodePointAtOperatorUTF32;
- }
- UNREACHABLE();
-}
-
-const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
+const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint(
UnicodeEncoding encoding) {
switch (encoding) {
case UnicodeEncoding::UTF16:
- return &cache_.kStringFromCodePointOperatorUTF16;
+ return &cache_.kStringFromSingleCodePointOperatorUTF16;
case UnicodeEncoding::UTF32:
- return &cache_.kStringFromCodePointOperatorUTF32;
+ return &cache_.kStringFromSingleCodePointOperatorUTF32;
}
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 09a1fed476..300bdc8598 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -49,6 +49,24 @@ struct FieldAccess {
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
+ FieldAccess()
+ : base_is_tagged(kTaggedBase),
+ offset(0),
+ type(Type::None()),
+ machine_type(MachineType::None()),
+ write_barrier_kind(kFullWriteBarrier) {}
+
+ FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
+ MaybeHandle<Map> map, Type* type, MachineType machine_type,
+ WriteBarrierKind write_barrier_kind)
+ : base_is_tagged(base_is_tagged),
+ offset(offset),
+ name(name),
+ map(map),
+ type(type),
+ machine_type(machine_type),
+ write_barrier_kind(write_barrier_kind) {}
+
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -58,7 +76,8 @@ size_t hash_value(FieldAccess const&);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, FieldAccess const&);
-FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE FieldAccess const& FieldAccessOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
template <>
void Operator1<FieldAccess>::PrintParameter(std::ostream& os,
@@ -75,6 +94,21 @@ struct ElementAccess {
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
+ ElementAccess()
+ : base_is_tagged(kTaggedBase),
+ header_size(0),
+ type(Type::None()),
+ machine_type(MachineType::None()),
+ write_barrier_kind(kFullWriteBarrier) {}
+
+ ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type* type,
+ MachineType machine_type, WriteBarrierKind write_barrier_kind)
+ : base_is_tagged(base_is_tagged),
+ header_size(header_size),
+ type(type),
+ machine_type(machine_type),
+ write_barrier_kind(write_barrier_kind) {}
+
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -85,12 +119,13 @@ size_t hash_value(ElementAccess const&);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ElementAccess const&);
V8_EXPORT_PRIVATE ElementAccess const& ElementAccessOf(const Operator* op)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
-ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+ExternalArrayType ExternalArrayTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
-ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+ConvertReceiverMode ConvertReceiverModeOf(Operator const* op)
+ V8_WARN_UNUSED_RESULT;
// A the parameters for several Check nodes. The {feedback} parameter is
// optional. If {feedback} references a valid CallIC slot and this MapCheck
@@ -112,7 +147,7 @@ size_t hash_value(CheckParameters const&);
std::ostream& operator<<(std::ostream&, CheckParameters const&);
-CheckParameters const& CheckParametersOf(Operator const*) WARN_UNUSED_RESULT;
+CheckParameters const& CheckParametersOf(Operator const*) V8_WARN_UNUSED_RESULT;
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
@@ -123,7 +158,8 @@ size_t hash_value(CheckFloat64HoleMode);
std::ostream& operator<<(std::ostream&, CheckFloat64HoleMode);
-CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator*)
+ V8_WARN_UNUSED_RESULT;
enum class CheckTaggedInputMode : uint8_t {
kNumber,
@@ -149,7 +185,7 @@ class CheckTaggedInputParameters {
};
const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
std::ostream& operator<<(std::ostream&,
const CheckTaggedInputParameters& params);
@@ -169,7 +205,8 @@ size_t hash_value(CheckForMinusZeroMode);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
CheckForMinusZeroMode);
-CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*)
+ V8_WARN_UNUSED_RESULT;
class CheckMinusZeroParameters {
public:
@@ -186,7 +223,7 @@ class CheckMinusZeroParameters {
};
const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
std::ostream& operator<<(std::ostream&, const CheckMinusZeroParameters& params);
@@ -252,13 +289,13 @@ size_t hash_value(CheckMapsParameters const&);
std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
-MapsParameterInfo const& MapGuardMapsOf(Operator const*) WARN_UNUSED_RESULT;
+MapsParameterInfo const& MapGuardMapsOf(Operator const*) V8_WARN_UNUSED_RESULT;
// Parameters for CompareMaps operator.
MapsParameterInfo const& CompareMapsParametersOf(Operator const*)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
// A descriptor for growing elements backing stores.
enum class GrowFastElementsMode : uint8_t {
@@ -294,7 +331,7 @@ inline size_t hash_value(const GrowFastElementsParameters&);
std::ostream& operator<<(std::ostream&, const GrowFastElementsParameters&);
const GrowFastElementsParameters& GrowFastElementsParametersOf(const Operator*)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
class ElementsTransition final {
@@ -324,16 +361,16 @@ size_t hash_value(ElementsTransition);
std::ostream& operator<<(std::ostream&, ElementsTransition);
ElementsTransition const& ElementsTransitionOf(const Operator* op)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
// Parameters for TransitionAndStoreElement, or
// TransitionAndStoreNonNumberElement, or
// TransitionAndStoreNumberElement.
-Handle<Map> DoubleMapParameterOf(const Operator* op);
-Handle<Map> FastMapParameterOf(const Operator* op);
+Handle<Map> DoubleMapParameterOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+Handle<Map> FastMapParameterOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// Parameters for TransitionAndStoreNonNumberElement.
-Type* ValueTypeParameterOf(const Operator* op);
+Type* ValueTypeParameterOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
@@ -348,8 +385,8 @@ size_t hash_value(NumberOperationHint);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
-NumberOperationHint NumberOperationHintOf(const Operator* op)
- WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
class NumberOperationParameters {
public:
@@ -371,10 +408,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
bool operator==(NumberOperationParameters const&,
NumberOperationParameters const&);
const NumberOperationParameters& NumberOperationParametersOf(const Operator* op)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
-int FormalParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
-bool IsRestLengthOf(const Operator* op) WARN_UNUSED_RESULT;
+int FormalParameterCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+bool IsRestLengthOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class AllocateParameters {
public:
@@ -397,15 +434,17 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
bool operator==(AllocateParameters const&, AllocateParameters const&);
-PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
+PretenureFlag PretenureFlagOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+
+Type* AllocateTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
+UnicodeEncoding UnicodeEncodingOf(const Operator*) V8_WARN_UNUSED_RESULT;
-UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
+AbortReason AbortReasonOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-AbortReason AbortReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+DeoptimizeReason DeoptimizeReasonOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-DeoptimizeReason DeoptimizeReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+int NewArgumentsElementsMappedCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
// Interface for building simplified operators, which represent the
// medium-level operations of V8, including adding numbers, allocating objects,
@@ -520,13 +559,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
- const Operator* StringCharAt();
const Operator* StringCharCodeAt();
- const Operator* SeqStringCharCodeAt();
const Operator* StringCodePointAt(UnicodeEncoding encoding);
- const Operator* SeqStringCodePointAt(UnicodeEncoding encoding);
- const Operator* StringFromCharCode();
- const Operator* StringFromCodePoint(UnicodeEncoding encoding);
+ const Operator* StringFromSingleCharCode();
+ const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
@@ -577,7 +613,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const VectorSlotPair& feedback);
const Operator* CheckReceiver();
- const Operator* CheckSeqString();
const Operator* CheckSmi(const VectorSlotPair& feedback);
const Operator* CheckString(const VectorSlotPair& feedback);
const Operator* CheckSymbol();
@@ -624,6 +659,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsUndetectable();
const Operator* NumberIsFloat64Hole();
+ const Operator* NumberIsFinite();
+ const Operator* ObjectIsFiniteNumber();
+ const Operator* NumberIsInteger();
+ const Operator* ObjectIsSafeInteger();
+ const Operator* NumberIsSafeInteger();
+ const Operator* ObjectIsInteger();
const Operator* ArgumentsFrame();
const Operator* ArgumentsLength(int formal_parameter_count,
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index fac466c36a..4679573f87 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -256,7 +256,7 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
switch (node->op()->opcode()) {
case IrOpcode::kStoreField: {
Node* stored_to = node->InputAt(0);
- FieldAccess access = OpParameter<FieldAccess>(node->op());
+ const FieldAccess& access = FieldAccessOf(node->op());
StoreOffset offset = ToOffset(access);
UnobservableStore observation = {stored_to->id(), offset};
@@ -297,7 +297,7 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
case IrOpcode::kLoadField: {
Node* loaded_from = node->InputAt(0);
- FieldAccess access = OpParameter<FieldAccess>(node->op());
+ const FieldAccess& access = FieldAccessOf(node->op());
StoreOffset offset = ToOffset(access);
TRACE(
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index ba82536d3d..093d9d041e 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -153,6 +153,10 @@ class TypeCache final {
Type* const kArgumentsLengthType =
Type::Range(0.0, Code::kMaxArguments, zone());
+ // The JSArrayIterator::kind property always contains an integer in the
+ // range [0, 2], representing the possible IterationKinds.
+ Type* const kJSArrayIteratorKindType = CreateRange(0.0, 2.0);
+
private:
template <typename T>
Type* CreateRange() {
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 49a4cdfdb3..8cb2c666e6 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -6,6 +6,7 @@
#include "src/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
@@ -85,8 +86,6 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReduceCheckNumber(node);
case IrOpcode::kCheckString:
return ReduceCheckString(node);
- case IrOpcode::kCheckSeqString:
- return ReduceCheckSeqString(node);
case IrOpcode::kCheckEqualsInternalizedString:
return ReduceCheckEqualsInternalizedString(node);
case IrOpcode::kCheckEqualsSymbol:
@@ -105,6 +104,10 @@ Reduction TypedOptimization::Reduce(Node* node) {
return ReducePhi(node);
case IrOpcode::kReferenceEqual:
return ReduceReferenceEqual(node);
+ case IrOpcode::kStringEqual:
+ case IrOpcode::kStringLessThan:
+ case IrOpcode::kStringLessThanOrEqual:
+ return ReduceStringComparison(node);
case IrOpcode::kSameValue:
return ReduceSameValue(node);
case IrOpcode::kSelect:
@@ -213,16 +216,6 @@ Reduction TypedOptimization::ReduceCheckString(Node* node) {
return NoChange();
}
-Reduction TypedOptimization::ReduceCheckSeqString(Node* node) {
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetType(input);
- if (input_type->Is(Type::SeqString())) {
- ReplaceWithValue(node, input);
- return Replace(input);
- }
- return NoChange();
-}
-
Reduction TypedOptimization::ReduceCheckEqualsInternalizedString(Node* node) {
Node* const exp = NodeProperties::GetValueInput(node, 0);
Type* const exp_type = NodeProperties::GetType(exp);
@@ -359,6 +352,137 @@ Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
return NoChange();
}
+const Operator* TypedOptimization::NumberComparisonFor(const Operator* op) {
+ switch (op->opcode()) {
+ case IrOpcode::kStringEqual:
+ return simplified()->NumberEqual();
+ case IrOpcode::kStringLessThan:
+ return simplified()->NumberLessThan();
+ case IrOpcode::kStringLessThanOrEqual:
+ return simplified()->NumberLessThanOrEqual();
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Reduction TypedOptimization::
+ TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
+ Node* comparison, Handle<String> string, bool inverted) {
+ switch (comparison->opcode()) {
+ case IrOpcode::kStringEqual:
+ if (string->length() != 1) {
+ // String.fromCharCode(x) always has length 1.
+ return Replace(jsgraph()->BooleanConstant(false));
+ }
+ break;
+ case IrOpcode::kStringLessThan:
+ V8_FALLTHROUGH;
+ case IrOpcode::kStringLessThanOrEqual:
+ if (string->length() == 0) {
+ // String.fromCharCode(x) <= "" is always false,
+ // "" < String.fromCharCode(x) is always true.
+ return Replace(jsgraph()->BooleanConstant(inverted));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return NoChange();
+}
+
+// Try to reduces a string comparison of the form
+// String.fromCharCode(x) {comparison} {constant} if inverted is false,
+// and {constant} {comparison} String.fromCharCode(x) if inverted is true.
+Reduction
+TypedOptimization::TryReduceStringComparisonOfStringFromSingleCharCode(
+ Node* comparison, Node* from_char_code, Node* constant, bool inverted) {
+ DCHECK_EQ(IrOpcode::kStringFromSingleCharCode, from_char_code->opcode());
+ HeapObjectMatcher m(constant);
+ if (!m.HasValue() || !m.Value()->IsString()) return NoChange();
+ Handle<String> string = Handle<String>::cast(m.Value());
+
+ // Check if comparison can be resolved statically.
+ Reduction red = TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
+ comparison, string, inverted);
+ if (red.Changed()) return red;
+
+ const Operator* comparison_op = NumberComparisonFor(comparison->op());
+ Node* from_char_code_repl = NodeProperties::GetValueInput(from_char_code, 0);
+ Type* from_char_code_repl_type = NodeProperties::GetType(from_char_code_repl);
+ if (!from_char_code_repl_type->Is(type_cache_.kUint16)) {
+ // Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
+ from_char_code_repl =
+ graph()->NewNode(simplified()->NumberToInt32(), from_char_code_repl);
+ from_char_code_repl = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), from_char_code_repl,
+ jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
+ }
+ Node* constant_repl = jsgraph()->Constant(string->Get(0));
+
+ Node* number_comparison = nullptr;
+ if (inverted) {
+ // "x..." <= String.fromCharCode(z) is true if x < z.
+ if (string->length() > 1 &&
+ comparison->opcode() == IrOpcode::kStringLessThanOrEqual) {
+ comparison_op = simplified()->NumberLessThan();
+ }
+ number_comparison =
+ graph()->NewNode(comparison_op, constant_repl, from_char_code_repl);
+ } else {
+ // String.fromCharCode(z) < "x..." is true if z <= x.
+ if (string->length() > 1 &&
+ comparison->opcode() == IrOpcode::kStringLessThan) {
+ comparison_op = simplified()->NumberLessThanOrEqual();
+ }
+ number_comparison =
+ graph()->NewNode(comparison_op, from_char_code_repl, constant_repl);
+ }
+ ReplaceWithValue(comparison, number_comparison);
+ return Replace(number_comparison);
+}
+
+Reduction TypedOptimization::ReduceStringComparison(Node* node) {
+ DCHECK(IrOpcode::kStringEqual == node->opcode() ||
+ IrOpcode::kStringLessThan == node->opcode() ||
+ IrOpcode::kStringLessThanOrEqual == node->opcode());
+ Node* const lhs = NodeProperties::GetValueInput(node, 0);
+ Node* const rhs = NodeProperties::GetValueInput(node, 1);
+ if (lhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
+ if (rhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
+ Node* left = NodeProperties::GetValueInput(lhs, 0);
+ Node* right = NodeProperties::GetValueInput(rhs, 0);
+ Type* left_type = NodeProperties::GetType(left);
+ Type* right_type = NodeProperties::GetType(right);
+ if (!left_type->Is(type_cache_.kUint16)) {
+ // Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
+ left = graph()->NewNode(simplified()->NumberToInt32(), left);
+ left = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), left,
+ jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
+ }
+ if (!right_type->Is(type_cache_.kUint16)) {
+ // Convert to signed int32 to satisfy type of {NumberBitwiseAnd}.
+ right = graph()->NewNode(simplified()->NumberToInt32(), right);
+ right = graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), right,
+ jsgraph()->Constant(std::numeric_limits<uint16_t>::max()));
+ }
+ Node* equal =
+ graph()->NewNode(NumberComparisonFor(node->op()), left, right);
+ ReplaceWithValue(node, equal);
+ return Replace(equal);
+ } else {
+ return TryReduceStringComparisonOfStringFromSingleCharCode(node, lhs, rhs,
+ false);
+ }
+ } else if (rhs->opcode() == IrOpcode::kStringFromSingleCharCode) {
+ return TryReduceStringComparisonOfStringFromSingleCharCode(node, rhs, lhs,
+ true);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceSameValue(Node* node) {
DCHECK_EQ(IrOpcode::kSameValue, node->opcode());
Node* const lhs = NodeProperties::GetValueInput(node, 0);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 75de75b143..e906ab37eb 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -41,7 +41,6 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckNumber(Node* node);
Reduction ReduceCheckString(Node* node);
- Reduction ReduceCheckSeqString(Node* node);
Reduction ReduceCheckEqualsInternalizedString(Node* node);
Reduction ReduceCheckEqualsSymbol(Node* node);
Reduction ReduceLoadField(Node* node);
@@ -50,6 +49,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
Reduction ReduceReferenceEqual(Node* node);
+ Reduction ReduceStringComparison(Node* node);
Reduction ReduceSameValue(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSpeculativeToNumber(Node* node);
@@ -57,6 +57,12 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceTypeOf(Node* node);
Reduction ReduceToBoolean(Node* node);
+ Reduction TryReduceStringComparisonOfStringFromSingleCharCode(
+ Node* comparison, Node* from_char_code, Node* constant, bool inverted);
+ Reduction TryReduceStringComparisonOfStringFromSingleCharCodeToConstant(
+ Node* comparison, Handle<String> string, bool inverted);
+ const Operator* NumberComparisonFor(const Operator* op);
+
CompilationDependencies* dependencies() const { return dependencies_; }
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 418fc17859..8d0265fc48 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -325,8 +325,8 @@ class Typer::Visitor : public Reducer {
static Type* NumberLessThanOrEqualTyper(Type*, Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
static Type* SameValueTyper(Type*, Type*, Typer*);
- static Type* StringFromCharCodeTyper(Type*, Typer*);
- static Type* StringFromCodePointTyper(Type*, Typer*);
+ static Type* StringFromSingleCharCodeTyper(Type*, Typer*);
+ static Type* StringFromSingleCodePointTyper(Type*, Typer*);
Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
@@ -734,12 +734,12 @@ Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
}
Type* Typer::Visitor::TypeNumberConstant(Node* node) {
- double number = OpParameter<double>(node);
+ double number = OpParameter<double>(node->op());
return Type::NewConstant(number, zone());
}
Type* Typer::Visitor::TypeHeapConstant(Node* node) {
- return TypeConstant(OpParameter<Handle<HeapObject>>(node));
+ return TypeConstant(HeapConstantOf(node->op()));
}
Type* Typer::Visitor::TypeExternalConstant(Node* node) {
@@ -1208,6 +1208,14 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
+Type* Typer::Visitor::TypeJSCreateArrayIterator(Node* node) {
+ return Type::OtherObject();
+}
+
+Type* Typer::Visitor::TypeJSCreateCollectionIterator(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateBoundFunction(Node* node) {
return Type::BoundFunction();
}
@@ -1237,6 +1245,10 @@ Type* Typer::Visitor::TypeJSCreatePromise(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreateTypedArray(Node* node) {
+ return Type::OtherObject();
+}
+
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
return Type::Array();
}
@@ -1374,6 +1386,8 @@ Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
UNREACHABLE();
}
+Type* Typer::Visitor::TypeJSStoreInArrayLiteral(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
return Type::Boolean();
}
@@ -1952,16 +1966,14 @@ Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
return Type::Boolean();
}
-Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
+Type* Typer::Visitor::StringFromSingleCharCodeTyper(Type* type, Typer* t) {
return Type::String();
}
-Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
+Type* Typer::Visitor::StringFromSingleCodePointTyper(Type* type, Typer* t) {
return Type::String();
}
-Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
-
Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) {
return Type::String();
}
@@ -1974,24 +1986,16 @@ Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
-Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
- return typer_->cache_.kUint16;
-}
-
Type* Typer::Visitor::TypeStringCodePointAt(Node* node) {
return Type::Range(0.0, String::kMaxCodePoint, zone());
}
-Type* Typer::Visitor::TypeSeqStringCodePointAt(Node* node) {
- return Type::Range(0.0, String::kMaxCodePoint, zone());
-}
-
-Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
- return TypeUnaryOp(node, StringFromCharCodeTyper);
+Type* Typer::Visitor::TypeStringFromSingleCharCode(Node* node) {
+ return TypeUnaryOp(node, StringFromSingleCharCodeTyper);
}
-Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
- return TypeUnaryOp(node, StringFromCodePointTyper);
+Type* Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) {
+ return TypeUnaryOp(node, StringFromSingleCodePointTyper);
}
Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
@@ -2062,11 +2066,6 @@ Type* Typer::Visitor::TypeCheckString(Node* node) {
return Type::Intersect(arg, Type::String(), zone());
}
-Type* Typer::Visitor::TypeCheckSeqString(Node* node) {
- Type* arg = Operand(node, 0);
- return Type::Intersect(arg, Type::SeqString(), zone());
-}
-
Type* Typer::Visitor::TypeCheckSymbol(Node* node) {
Type* arg = Operand(node, 0);
return Type::Intersect(arg, Type::Symbol(), zone());
@@ -2181,6 +2180,24 @@ Type* Typer::Visitor::TypeNumberIsFloat64Hole(Node* node) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeNumberIsFinite(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeObjectIsFiniteNumber(Node* node) {
+ return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeNumberIsInteger(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeObjectIsSafeInteger(Node* node) {
+ return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeNumberIsSafeInteger(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeObjectIsInteger(Node* node) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
return TypeUnaryOp(node, ObjectIsNaN);
}
@@ -2235,9 +2252,7 @@ Type* Typer::Visitor::TypeNewArgumentsElements(Node* node) {
return Type::OtherInternal();
}
-Type* Typer::Visitor::TypeNewConsString(Node* node) {
- return Type::OtherNonSeqString();
-}
+Type* Typer::Visitor::TypeNewConsString(Node* node) { return Type::String(); }
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 3e3dbbe769..87a450f071 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -161,20 +161,18 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kOtherNonSeqString;
case STRING_TYPE:
case ONE_BYTE_STRING_TYPE:
- return kOtherSeqString;
+ return kString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kInternalizedNonSeqString;
case INTERNALIZED_STRING_TYPE:
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
- return kInternalizedSeqString;
+ return kInternalizedString;
case SYMBOL_TYPE:
return kSymbol;
case BIGINT_TYPE:
@@ -223,7 +221,9 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
+ case JS_ARRAY_ITERATOR_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
@@ -235,15 +235,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
-
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
-
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
case WASM_MODULE_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
case WASM_TABLE_TYPE:
@@ -268,9 +264,12 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
case HASH_TABLE_TYPE:
+ case WEAK_FIXED_ARRAY_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
+ case FEEDBACK_METADATA_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
case FEEDBACK_CELL_TYPE:
@@ -278,6 +277,15 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCOPE_INFO_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
@@ -297,6 +305,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
#undef FIXED_TYPED_ARRAY_CASE
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
@@ -309,8 +318,12 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SMALL_ORDERED_HASH_MAP_TYPE:
case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case INTERPRETER_DATA_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
+ case WASM_COMPILED_MODULE_TYPE:
+ case WASM_DEBUG_INFO_TYPE:
+ case WASM_SHARED_MODULE_DATA_TYPE:
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
case CONTEXT_EXTENSION_TYPE:
@@ -361,6 +374,7 @@ size_t BitsetType::BoundariesSize() {
}
Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
+ DCHECK_IMPLIES(bits & kOtherString, (bits & kString) == kString);
DisallowHeapAllocation no_allocation;
if (!(bits & kPlainNumber)) return bits; // Shortcut.
const Boundary* boundaries = Boundaries();
@@ -823,7 +837,7 @@ Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
} else if (value->IsHeapNumber()) {
return NewConstant(value->Number(), zone);
} else if (value->IsString() && !value->IsInternalizedString()) {
- return Type::OtherString();
+ return Type::String();
}
return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index c4c371dab3..32ce987cea 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -103,33 +103,31 @@ namespace compiler {
V(OtherUnsigned32, 1u << 2) \
V(OtherSigned32, 1u << 3) \
V(OtherNumber, 1u << 4) \
+ V(OtherString, 1u << 5) \
#define PROPER_BITSET_TYPE_LIST(V) \
V(None, 0u) \
- V(Negative31, 1u << 5) \
- V(Null, 1u << 6) \
- V(Undefined, 1u << 7) \
- V(Boolean, 1u << 8) \
- V(Unsigned30, 1u << 9) \
- V(MinusZero, 1u << 10) \
- V(NaN, 1u << 11) \
- V(Symbol, 1u << 12) \
- V(InternalizedNonSeqString, 1u << 13) \
- V(InternalizedSeqString, 1u << 14) \
- V(OtherNonSeqString, 1u << 15) \
- V(OtherSeqString, 1u << 16) \
- V(OtherCallable, 1u << 17) \
- V(OtherObject, 1u << 18) \
- V(OtherUndetectable, 1u << 19) \
- V(CallableProxy, 1u << 20) \
- V(OtherProxy, 1u << 21) \
- V(Function, 1u << 22) \
- V(BoundFunction, 1u << 23) \
- V(Hole, 1u << 24) \
- V(OtherInternal, 1u << 25) \
- V(ExternalPointer, 1u << 26) \
- V(Array, 1u << 27) \
- V(BigInt, 1u << 28) \
+ V(Negative31, 1u << 6) \
+ V(Null, 1u << 7) \
+ V(Undefined, 1u << 8) \
+ V(Boolean, 1u << 9) \
+ V(Unsigned30, 1u << 10) \
+ V(MinusZero, 1u << 11) \
+ V(NaN, 1u << 12) \
+ V(Symbol, 1u << 13) \
+ V(InternalizedString, 1u << 14) \
+ V(OtherCallable, 1u << 16) \
+ V(OtherObject, 1u << 17) \
+ V(OtherUndetectable, 1u << 18) \
+ V(CallableProxy, 1u << 19) \
+ V(OtherProxy, 1u << 20) \
+ V(Function, 1u << 21) \
+ V(BoundFunction, 1u << 22) \
+ V(Hole, 1u << 23) \
+ V(OtherInternal, 1u << 24) \
+ V(ExternalPointer, 1u << 25) \
+ V(Array, 1u << 26) \
+ V(BigInt, 1u << 27) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -150,12 +148,6 @@ namespace compiler {
V(MinusZeroOrNaN, kMinusZero | kNaN) \
V(Number, kOrderedNumber | kNaN) \
V(Numeric, kNumber | kBigInt) \
- V(InternalizedString, kInternalizedNonSeqString | \
- kInternalizedSeqString) \
- V(OtherString, kOtherNonSeqString | kOtherSeqString) \
- V(SeqString, kInternalizedSeqString | kOtherSeqString) \
- V(NonSeqString, kInternalizedNonSeqString | \
- kOtherNonSeqString) \
V(String, kInternalizedString | kOtherString) \
V(UniqueName, kSymbol | kInternalizedString) \
V(Name, kSymbol | kString) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e9a5be6f65..54a0e47dd9 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -298,8 +298,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kIfValue: {
for (const Node* user : node->uses()) {
if (user != use && user->opcode() == IrOpcode::kIfValue) {
- CHECK_NE(OpParameter<int32_t>(use->op()),
- OpParameter<int32_t>(user->op()));
+ CHECK_NE(IfValueParametersOf(use->op()).value(),
+ IfValueParametersOf(user->op()).value());
}
}
++count_case;
@@ -665,6 +665,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is Array.
CheckTypeIs(node, Type::Array());
break;
+ case IrOpcode::kJSCreateArrayIterator:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
+ case IrOpcode::kJSCreateCollectionIterator:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateBoundFunction:
// Type is BoundFunction.
CheckTypeIs(node, Type::BoundFunction());
@@ -689,6 +697,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateTypedArray:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateLiteralArray:
// Type is Array.
CheckTypeIs(node, Type::Array());
@@ -739,8 +751,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSStoreDataPropertyInLiteral:
+ case IrOpcode::kJSStoreInArrayLiteral:
// Type is empty.
CheckNotTyped(node);
+ CHECK(FeedbackParameterOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
@@ -1078,42 +1092,24 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::String());
CheckTypeIs(node, Type::Number());
break;
- case IrOpcode::kStringCharAt:
- // (String, Unsigned32) -> String
- CheckValueInputIs(node, 0, Type::String());
- CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckTypeIs(node, Type::String());
- break;
case IrOpcode::kStringCharCodeAt:
// (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
- case IrOpcode::kSeqStringCharCodeAt:
- // (SeqString, Unsigned32) -> UnsignedSmall
- CheckValueInputIs(node, 0, Type::SeqString());
- CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckTypeIs(node, Type::UnsignedSmall());
- break;
case IrOpcode::kStringCodePointAt:
// (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
- case IrOpcode::kSeqStringCodePointAt:
- // (String, Unsigned32) -> UnsignedSmall
- CheckValueInputIs(node, 0, Type::String());
- CheckValueInputIs(node, 1, Type::Unsigned32());
- CheckTypeIs(node, Type::UnsignedSmall());
- break;
- case IrOpcode::kStringFromCharCode:
+ case IrOpcode::kStringFromSingleCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
break;
- case IrOpcode::kStringFromCodePoint:
+ case IrOpcode::kStringFromSingleCodePoint:
// (Unsigned32) -> String
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
@@ -1174,6 +1170,30 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::NumberOrHole());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNumberIsFinite:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kObjectIsFiniteNumber:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kNumberIsInteger:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kObjectIsSafeInteger:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kNumberIsSafeInteger:
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::Boolean());
+ break;
+ case IrOpcode::kObjectIsInteger:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kFindOrderedHashMapEntry:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::SignedSmall());
@@ -1206,7 +1226,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
CheckValueInputIs(node, 1, Type::String());
CheckValueInputIs(node, 2, Type::String());
- CheckTypeIs(node, Type::OtherString());
+ CheckTypeIs(node, Type::String());
break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
@@ -1392,10 +1412,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::String());
break;
- case IrOpcode::kCheckSeqString:
- CheckValueInputIs(node, 0, Type::Any());
- CheckTypeIs(node, Type::SeqString());
- break;
case IrOpcode::kCheckSymbol:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Symbol());
@@ -1673,10 +1689,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
- case IrOpcode::kSpeculationPoison:
+ case IrOpcode::kPoisonOnSpeculationTagged:
+ case IrOpcode::kPoisonOnSpeculationWord:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
+ case IrOpcode::kLoadRootsPointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kWord32AtomicLoad:
@@ -1688,6 +1706,15 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicXor:
+ case IrOpcode::kWord64AtomicLoad:
+ case IrOpcode::kWord64AtomicStore:
+ case IrOpcode::kWord64AtomicAdd:
+ case IrOpcode::kWord64AtomicSub:
+ case IrOpcode::kWord64AtomicAnd:
+ case IrOpcode::kWord64AtomicOr:
+ case IrOpcode::kWord64AtomicXor:
+ case IrOpcode::kWord64AtomicExchange:
+ case IrOpcode::kWord64AtomicCompareExchange:
case IrOpcode::kSpeculationFence:
case IrOpcode::kSignExtendWord8ToInt32:
case IrOpcode::kSignExtendWord16ToInt32:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 080479a010..61c5c152ba 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -30,7 +30,7 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/log-inl.h"
#include "src/trap-handler/trap-handler.h"
@@ -58,6 +58,21 @@ namespace compiler {
FATAL("Unsupported opcode 0x%x:%s", (opcode), \
wasm::WasmOpcodes::OpcodeName(opcode));
+#define WASM_INSTANCE_OBJECT_OFFSET(name) \
+ (WasmInstanceObject::k##name##Offset - kHeapObjectTag)
+
+#define LOAD_INSTANCE_FIELD(name, type) \
+ graph()->NewNode( \
+ jsgraph()->machine()->Load(type), instance_node_.get(), \
+ jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(name)), *effect_, \
+ *control_)
+
+#define LOAD_FIXED_ARRAY_SLOT(array_node, index) \
+ graph()->NewNode(jsgraph()->machine()->Load(MachineType::TaggedPointer()), \
+ array_node, \
+ jsgraph()->Int32Constant(FixedArrayOffsetMinusTag(index)), \
+ *effect_, *control_);
+
namespace {
constexpr uint32_t kBytesPerExceptionValuesArrayElement = 2;
@@ -82,14 +97,14 @@ bool ContainsSimd(wasm::FunctionSig* sig) {
WasmGraphBuilder::WasmGraphBuilder(
ModuleEnv* env, Zone* zone, JSGraph* jsgraph, Handle<Code> centry_stub,
- wasm::FunctionSig* sig,
+ Handle<Oddball> anyref_null, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table,
RuntimeExceptionSupport exception_support)
: zone_(zone),
jsgraph_(jsgraph),
centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
+ anyref_null_node_(jsgraph_->HeapConstant(anyref_null)),
env_(env),
- function_tables_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
has_simd_(ContainsSimd(sig)),
@@ -796,6 +811,8 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
return jsgraph()->machine()->Is32()
? BuildCcallConvertFloat(input, position, opcode)
: BuildIntConvertFloat(input, position, opcode);
+ case wasm::kExprRefIsNull:
+ return graph()->NewNode(m->WordEqual(), input, anyref_null_node_);
case wasm::kExprI32AsmjsLoadMem8S:
return BuildAsmjsLoadMem(MachineType::Int8(), input);
case wasm::kExprI32AsmjsLoadMem8U:
@@ -1715,10 +1732,11 @@ Node* WasmGraphBuilder::BuildI32Ctz(Node* input) {
}
Node* WasmGraphBuilder::BuildI64Ctz(Node* input) {
- return Unop(wasm::kExprI64UConvertI32,
- BuildBitCountingCall(input, ExternalReference::wasm_word64_ctz(
- jsgraph()->isolate()),
- MachineRepresentation::kWord64));
+ return Unop(
+ wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(
+ input, ExternalReference::wasm_word64_ctz(jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
@@ -1728,10 +1746,11 @@ Node* WasmGraphBuilder::BuildI32Popcnt(Node* input) {
}
Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
- return Unop(wasm::kExprI64UConvertI32,
- BuildBitCountingCall(input, ExternalReference::wasm_word64_popcnt(
- jsgraph()->isolate()),
- MachineRepresentation::kWord64));
+ return Unop(
+ wasm::kExprI64UConvertI32,
+ BuildBitCountingCall(
+ input, ExternalReference::wasm_word64_popcnt(jsgraph()->isolate()),
+ MachineRepresentation::kWord64));
}
Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
@@ -2530,28 +2549,30 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
- Node* wasm_context) {
- if (wasm_context == nullptr) {
- DCHECK_NOT_NULL(wasm_context_);
- wasm_context = wasm_context_.get();
+ Node* instance_node,
+ UseRetpoline use_retpoline) {
+ if (instance_node == nullptr) {
+ DCHECK_NOT_NULL(instance_node_);
+ instance_node = instance_node_.get();
}
SetNeedsStackCheck();
const size_t params = sig->parameter_count();
- const size_t extra = 3; // wasm_context, effect, and control.
+ const size_t extra = 3; // instance_node, effect, and control.
const size_t count = 1 + params + extra;
// Reallocate the buffer to make space for extra inputs.
args = Realloc(args, 1 + params, count);
- // Make room for the wasm_context parameter at index 1, just after code.
+ // Make room for the instance_node parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context;
+ args[1] = instance_node;
// Add effect and control inputs.
args[params + 2] = *effect_;
args[params + 3] = *control_;
- auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ auto call_descriptor =
+ GetWasmCallDescriptor(jsgraph()->zone(), sig, use_retpoline);
const Operator* op = jsgraph()->common()->Call(call_descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
SetSourcePosition(call, position);
@@ -2578,22 +2599,35 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
- if (FLAG_wasm_jit_to_native) {
+
+ if (env_ && index < env_->module->num_imported_functions) {
+ // A call to an imported function.
+ // Load the instance from the imported_instances array at a known offset.
+ Node* imported_instances = LOAD_INSTANCE_FIELD(
+ ImportedFunctionInstances, MachineType::TaggedPointer());
+ Node* instance_node = LOAD_FIXED_ARRAY_SLOT(imported_instances, index);
+
+ // Load the target from the imported_targets array at a known offset.
+ Node* imported_targets =
+ LOAD_INSTANCE_FIELD(ImportedFunctionTargets, MachineType::Pointer());
+ Node* target_node = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::Pointer()), imported_targets,
+ jsgraph()->Int32Constant(index * sizeof(Address)),
+ jsgraph()->graph()->start(), jsgraph()->graph()->start());
+ args[0] = target_node;
+ return BuildWasmCall(
+ sig, args, rets, position, instance_node,
+ untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
+
+ } else {
+ // A call to a function in this module.
// Just encode the function index. This will be patched at instantiation.
Address code = reinterpret_cast<Address>(index);
args[0] = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(code), RelocInfo::WASM_CALL);
- } else {
- // Add code object as constant.
- Handle<Code> code = index < env_->function_code.size()
- ? env_->function_code[index]
- : env_->default_function_code;
- DCHECK(!code.is_null());
- args[0] = HeapConstant(code);
+ return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline);
}
-
- return BuildWasmCall(sig, args, rets, position);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
@@ -2603,18 +2637,16 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
DCHECK_NOT_NULL(env_);
// Assume only one table for now.
- uint32_t table_index = 0;
wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- Node* table = nullptr;
- Node* table_size = nullptr;
- GetFunctionTableNodes(table_index, &table, &table_size);
+ Node* ift_size =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+
MachineOperatorBuilder* machine = jsgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
- Node* in_bounds =
- graph()->NewNode(machine->Uint32LessThan(), key, table_size);
+ Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, ift_size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
// Mask the key to prevent SSCA.
@@ -2624,81 +2656,56 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
Node* masked_diff = graph()->NewNode(
machine->Word32And(),
- graph()->NewNode(machine->Int32Sub(), key, table_size), neg_key);
+ graph()->NewNode(machine->Int32Sub(), key, ift_size), neg_key);
Node* mask =
graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
key = graph()->NewNode(machine->Word32And(), key, mask);
}
// Load signature from the table and check.
- // The table is a FixedArray; signatures are encoded as SMIs.
- // [sig1, code1, sig2, code2, sig3, code3, ...]
- static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
- static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
- static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
-
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- if (WASM_CONTEXT_TABLES) {
- // The table entries are {IndirectFunctionTableEntry} structs.
- Node* scaled_key =
- graph()->NewNode(machine->Int32Mul(), key,
- Int32Constant(sizeof(IndirectFunctionTableEntry)));
- const Operator* add = nullptr;
- if (machine->Is64()) {
- scaled_key = graph()->NewNode(machine->ChangeInt32ToInt64(), scaled_key);
- add = machine->Int64Add();
- } else {
- add = machine->Int32Add();
- }
- Node* entry_address = graph()->NewNode(add, table, scaled_key);
- Node* loaded_sig = graph()->NewNode(
- machine->Load(MachineType::Int32()), entry_address,
- Int32Constant(offsetof(IndirectFunctionTableEntry, sig_id)), *effect_,
- *control_);
- Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
- Int32Constant(canonical_sig_num));
-
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
+ Node* ift_sig_ids =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
- Node* target = graph()->NewNode(
- machine->Load(MachineType::Pointer()), entry_address,
- Int32Constant(offsetof(IndirectFunctionTableEntry, target)), *effect_,
- *control_);
-
- Node* loaded_context = graph()->NewNode(
- machine->Load(MachineType::Pointer()), entry_address,
- Int32Constant(offsetof(IndirectFunctionTableEntry, context)), *effect_,
- *control_);
-
- args[0] = target;
-
- return BuildWasmCall(sig, args, rets, position, loaded_context);
+ int32_t expected_sig_id = env_->module->signature_ids[sig_index];
+ Node* scaled_key =
+ graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2));
+ const Operator* add = nullptr;
+ if (machine->Is64()) {
+ scaled_key = graph()->NewNode(machine->ChangeUint32ToUint64(), scaled_key);
+ add = machine->Int64Add();
+ } else {
+ add = machine->Int32Add();
}
- // The table entries are elements of a fixed array.
- ElementAccess access = AccessBuilder::ForFixedArrayElement();
- const int fixed_offset = access.header_size - access.tag();
- Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2 + 1));
Node* loaded_sig =
- graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
- graph()->NewNode(machine->Int32Add(), key_offset,
- Int32Constant(fixed_offset)),
- *effect_, *control_);
- CHECK_GE(canonical_sig_num, 0);
+ graph()->NewNode(machine->Load(MachineType::Int32()), ift_sig_ids,
+ scaled_key, *effect_, *control_);
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
- jsgraph()->SmiConstant(canonical_sig_num));
+ Int32Constant(expected_sig_id));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- // Load code object from the table. It is held by a Foreign.
- Node* entry = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), table,
- graph()->NewNode(machine->Int32Add(), key_offset,
- Uint32Constant(fixed_offset + kPointerSize)),
- *effect_, *control_);
- args[0] = entry;
- return BuildWasmCall(sig, args, rets, position);
+ Node* ift_targets =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
+ Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableInstances,
+ MachineType::TaggedPointer());
+
+ scaled_key = graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2));
+
+ Node* target = graph()->NewNode(machine->Load(MachineType::Pointer()),
+ ift_targets, scaled_key, *effect_, *control_);
+
+ auto access = AccessBuilder::ForFixedArrayElement();
+ Node* target_instance = graph()->NewNode(
+ machine->Load(MachineType::TaggedPointer()),
+ graph()->NewNode(add, ift_instances, scaled_key),
+ Int32Constant(access.header_size - access.tag()), *effect_, *control_);
+
+ args[0] = target;
+
+ return BuildWasmCall(sig, args, rets, position, target_instance,
+ untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline);
}
Node* WasmGraphBuilder::BuildI32Rol(Node* left, Node* right) {
@@ -2840,6 +2847,8 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
+ case wasm::kWasmAnyRef:
+ return node;
case wasm::kWasmStmt:
return jsgraph()->UndefinedConstant();
default:
@@ -2922,6 +2931,11 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* js_context,
wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
+ // The parameter is of type AnyRef, we take it as is.
+ if (type == wasm::kWasmAnyRef) {
+ return node;
+ }
+
// Do a JavaScript ToNumber.
Node* num = BuildJavaScriptToNumber(node, js_context);
@@ -3028,11 +3042,11 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
-void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
- Address wasm_context_address) {
+void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<WeakCell> weak_instance,
+ wasm::WasmCode* wasm_code) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
const int count =
- wasm_count + 4; // wasm_code, wasm_context, effect, and control.
+ wasm_count + 4; // wasm_code, instance_node, effect, and control.
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
@@ -3046,24 +3060,24 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
graph()->start());
- // Create the wasm_context node to pass as parameter. This must be a
- // RelocatableIntPtrConstant because JSToWasm wrappers are compiled at module
- // compile time and patched at instance build time.
- DCHECK_NULL(wasm_context_);
- wasm_context_ = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<uintptr_t>(wasm_context_address),
- RelocInfo::WASM_CONTEXT_REFERENCE);
-
- Node* wasm_code_node = nullptr;
- if (!wasm_code.IsCodeObject()) {
- const wasm::WasmCode* code = wasm_code.GetWasmCode();
- Address instr_start =
- code == nullptr ? nullptr : code->instructions().start();
- wasm_code_node = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(instr_start), RelocInfo::JS_TO_WASM_CALL);
- } else {
- wasm_code_node = HeapConstant(wasm_code.GetCode());
- }
+ // Create the instance_node node to pass as parameter. This is either
+ // an actual reference to an instance or a placeholder reference,
+ // since JSToWasm wrappers can be compiled at module compile time and
+ // patched at instance build time.
+ DCHECK_NULL(instance_node_);
+ // TODO(titzer): JSToWasmWrappers should load the instance from the
+ // incoming JSFunction, but this is currently too slow/too complex because
+ // we use a regular JS property with a private symbol.
+ instance_node_ = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::TaggedPointer()),
+ jsgraph()->HeapConstant(weak_instance),
+ jsgraph()->Int32Constant(WeakCell::kValueOffset - kHeapObjectTag),
+ *effect_, *control_);
+
+ Address instr_start =
+ wasm_code == nullptr ? nullptr : wasm_code->instructions().start();
+ Node* wasm_code_node = jsgraph()->RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(instr_start), RelocInfo::JS_TO_WASM_CALL);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the js_context of the calling javascript function
// (passed as a parameter), such that the generated code is js_context
@@ -3071,12 +3085,13 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
nullptr, 0);
+ // TODO(titzer): remove the below weird special case.
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_.get();
+ args[pos++] = instance_node_.get();
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -3092,7 +3107,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_.get();
+ args[pos++] = instance_node_.get();
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -3127,42 +3142,15 @@ int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig) {
// Convert wasm numbers to JS values.
for (int i = 0; i < param_count; ++i) {
- Node* param = Param(i + 1); // Start from index 1 to drop the wasm_context.
+ Node* param =
+ Param(i + 1); // Start from index 1 to drop the instance_node.
args[pos++] = ToJS(param, sig->GetParam(i));
}
return pos;
}
-Node* WasmGraphBuilder::LoadImportDataAtOffset(int offset, Node* table) {
- offset = FixedArray::OffsetOfElementAt(offset) - kHeapObjectTag;
- Node* offset_node = jsgraph()->Int32Constant(offset);
- Node* import_data = graph()->NewNode(
- jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()), table,
- offset_node, *effect_, *control_);
- *effect_ = import_data;
- return import_data;
-}
-
-Node* WasmGraphBuilder::LoadNativeContext(Node* table) {
- // The js_imports_table is set up so that index 0 has isolate->native_context
- return LoadImportDataAtOffset(0, table);
-}
-
-int OffsetForImportData(int index, WasmGraphBuilder::ImportDataType type) {
- // The js_imports_table is set up so that index 0 has isolate->native_context
- // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's
- // global proxy and 3*index+3 has function's context.
- return 3 * index + type;
-}
-
-Node* WasmGraphBuilder::LoadImportData(int index, ImportDataType type,
- Node* table) {
- return LoadImportDataAtOffset(OffsetForImportData(index, type), table);
-}
-
-bool WasmGraphBuilder::BuildWasmToJSWrapper(
- Handle<JSReceiver> target, Handle<FixedArray> global_js_imports_table,
- int index) {
+bool WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
+ int index) {
DCHECK(target->IsCallable());
int wasm_count = static_cast<int>(sig_->parameter_count());
@@ -3174,22 +3162,30 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
*effect_ = start;
*control_ = start;
- // We add the target function to a table and look it up during runtime. This
- // ensures that if the GC kicks in, it doesn't need to patch the code for the
- // JS function.
- // js_imports_table is fixed array with global handle scope whose lifetime is
- // tied to the instance.
- // TODO(aseemgarg): explore using per-import global handle instead of a table
- Node* table_ptr = jsgraph()->IntPtrConstant(
- reinterpret_cast<intptr_t>(global_js_imports_table.location()));
- Node* table = graph()->NewNode(
- jsgraph()->machine()->Load(LoadRepresentation::TaggedPointer()),
- table_ptr, jsgraph()->IntPtrConstant(0), *effect_, *control_);
- *effect_ = table;
+ instance_node_.set(Param(compiler::kWasmInstanceParameterIndex));
+ Node* callables_node = LOAD_INSTANCE_FIELD(ImportedFunctionCallables,
+ MachineType::TaggedPointer());
+ Node* callable_node = LOAD_FIXED_ARRAY_SLOT(callables_node, index);
+ Node* undefined_node =
+ jsgraph()->Constant(handle(isolate->heap()->undefined_value(), isolate));
+
+ Node* compiled_module =
+ LOAD_INSTANCE_FIELD(CompiledModule, MachineType::TaggedPointer());
+ // TODO(wasm): native context is only weak because of recycling compiled
+ // modules.
+ Node* weak_native_context = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::TaggedPointer()), compiled_module,
+ jsgraph()->Int32Constant(WasmCompiledModule::kNativeContextOffset -
+ kHeapObjectTag),
+ *effect_, *control_);
+ Node* native_context = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::TaggedPointer()),
+ weak_native_context,
+ jsgraph()->Int32Constant(WeakCell::kValueOffset - kHeapObjectTag),
+ *effect_, *control_);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError.
- Node* native_context = LoadNativeContext(table);
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, native_context,
nullptr, 0);
// We don't need to return a value here, as the runtime call will not return
@@ -3198,54 +3194,89 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
return false;
}
- Node** args = Buffer(wasm_count + 7);
-
+ Node** args = Buffer(wasm_count + 9);
Node* call = nullptr;
BuildModifyThreadInWasmFlag(false);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
- if (function->shared()->internal_formal_parameter_count() == wasm_count) {
- int pos = 0;
- args[pos++] =
- LoadImportData(index, kFunction, table); // target callable.
- // Receiver.
- if (is_sloppy(function->shared()->language_mode()) &&
- !function->shared()->native()) {
- args[pos++] = LoadImportData(index, kGlobalProxy, table);
- } else {
- args[pos++] = jsgraph()->Constant(
- handle(isolate->heap()->undefined_value(), isolate));
- }
-
- call_descriptor = Linkage::GetJSCallDescriptor(
- graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
+ FieldAccess field_access = AccessBuilder::ForJSFunctionContext();
+ Node* function_context = graph()->NewNode(
+ jsgraph()->machine()->Load(MachineType::TaggedPointer()), callable_node,
+ jsgraph()->Int32Constant(field_access.offset - field_access.tag()),
+ *effect_, *control_);
- // Convert wasm numbers to JS values.
- pos = AddParameterNodes(args, pos, wasm_count, sig_);
+ if (!IsClassConstructor(function->shared()->kind())) {
+ if (function->shared()->internal_formal_parameter_count() == wasm_count) {
+ int pos = 0;
+ args[pos++] = callable_node; // target callable.
+ // Receiver.
+ if (is_sloppy(function->shared()->language_mode()) &&
+ !function->shared()->native()) {
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ native_context, Context::GLOBAL_PROXY_INDEX);
+ args[pos++] = global_proxy;
+ } else {
+ args[pos++] = undefined_node;
+ }
- args[pos++] = jsgraph()->UndefinedConstant(); // new target
- args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = LoadImportData(index, kFunctionContext, table);
- args[pos++] = *effect_;
- args[pos++] = *control_;
+ call_descriptor = Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
+
+ // Convert wasm numbers to JS values.
+ pos = AddParameterNodes(args, pos, wasm_count, sig_);
+
+ args[pos++] = undefined_node; // new target
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = function_context;
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos,
+ args);
+ } else if (function->shared()->internal_formal_parameter_count() >= 0) {
+ Callable callable = CodeFactory::ArgumentAdaptor(isolate);
+ int pos = 0;
+ args[pos++] = jsgraph()->HeapConstant(callable.code());
+ args[pos++] = callable_node; // target callable
+ args[pos++] = undefined_node; // new target
+ args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
+ args[pos++] = jsgraph()->Int32Constant(
+ function->shared()->internal_formal_parameter_count());
+ // Receiver.
+ if (is_sloppy(function->shared()->language_mode()) &&
+ !function->shared()->native()) {
+ Node* global_proxy = LOAD_FIXED_ARRAY_SLOT(
+ native_context, Context::GLOBAL_PROXY_INDEX);
+ args[pos++] = global_proxy;
+ } else {
+ args[pos++] = undefined_node;
+ }
- call = graph()->NewNode(jsgraph()->common()->Call(call_descriptor), pos,
- args);
+ // Convert wasm numbers to JS values.
+ pos = AddParameterNodes(args, pos, wasm_count, sig_);
+ args[pos++] = function_context;
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+ call = graph()->NewNode(
+ jsgraph()->common()->Call(Linkage::GetStubCallDescriptor(
+ isolate, jsgraph()->zone(), callable.descriptor(),
+ 1 + wasm_count, CallDescriptor::kNoFlags)),
+ pos, args);
+ }
}
}
// We cannot call the target directly, we have to use the Call builtin.
- Node* native_context = nullptr;
if (!call) {
int pos = 0;
+ // We cannot call the target directly, we have to use the Call builtin.
Callable callable = CodeFactory::Call(isolate);
args[pos++] = jsgraph()->HeapConstant(callable.code());
- args[pos++] = LoadImportData(index, kFunction, table); // target callable.
+ args[pos++] = callable_node;
args[pos++] = jsgraph()->Int32Constant(wasm_count); // argument count
- args[pos++] = jsgraph()->Constant(
- handle(isolate->heap()->undefined_value(), isolate)); // receiver
+ args[pos++] = undefined_node; // receiver
call_descriptor = Linkage::GetStubCallDescriptor(
isolate, graph()->zone(), callable.descriptor(), wasm_count + 1,
@@ -3259,7 +3290,6 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
// is only needed if the target is a constructor to throw a TypeError, if
// the target is a native function, or if the target is a callable JSObject,
// which can only be constructed by the runtime.
- native_context = LoadNativeContext(table);
args[pos++] = native_context;
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -3271,15 +3301,13 @@ bool WasmGraphBuilder::BuildWasmToJSWrapper(
*effect_ = call;
SetSourcePosition(call, 0);
- BuildModifyThreadInWasmFlag(true);
-
// Convert the return value back.
Node* val = sig_->return_count() == 0
? jsgraph()->Int32Constant(0)
- : FromJS(call,
- native_context != nullptr ? native_context
- : LoadNativeContext(table),
- sig_->GetReturn());
+ : FromJS(call, native_context, sig_->GetReturn());
+
+ BuildModifyThreadInWasmFlag(true);
+
Return(val);
return true;
}
@@ -3293,46 +3321,6 @@ bool HasInt64ParamOrReturn(wasm::FunctionSig* sig) {
}
} // namespace
-void WasmGraphBuilder::BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code,
- Address new_context_address) {
- int wasm_count = static_cast<int>(sig_->parameter_count());
- int count = wasm_count + 4; // wasm_code, wasm_context, effect, and control.
- Node** args = Buffer(count);
-
- // Build the start node.
- Node* start = Start(count + 1);
- *control_ = start;
- *effect_ = start;
-
- int pos = 0;
- // Add the wasm code target.
- if (!wasm_code.IsCodeObject()) {
- const wasm::WasmCode* code = wasm_code.GetWasmCode();
- Address instr_start =
- code == nullptr ? nullptr : code->instructions().start();
- args[pos++] = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(instr_start), RelocInfo::JS_TO_WASM_CALL);
- } else {
- args[pos++] = jsgraph()->HeapConstant(wasm_code.GetCode());
- }
- // Add the wasm_context of the other instance.
- args[pos++] = jsgraph()->IntPtrConstant(
- reinterpret_cast<uintptr_t>(new_context_address));
- // Add the parameters starting from index 1 since the parameter with index 0
- // is the old wasm_context.
- for (int i = 0; i < wasm_count; ++i) {
- args[pos++] = Param(i + 1);
- }
- args[pos++] = *effect_;
- args[pos++] = *control_;
-
- // Tail-call the wasm code.
- auto call_descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig_);
- Node* tail_call = graph()->NewNode(
- jsgraph()->common()->TailCall(call_descriptor), count, args);
- MergeControlToEnd(jsgraph(), tail_call);
-}
-
void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) {
int param_count = static_cast<int>(sig_->parameter_count());
@@ -3366,7 +3354,7 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(uint32_t func_index) {
for (int i = 0; i < param_count; ++i) {
wasm::ValueType type = sig_->GetParam(i);
- // Start from the parameter with index 1 to drop the wasm_context.
+ // Start from the parameter with index 1 to drop the instance_node.
*effect_ = graph()->NewNode(GetSafeStoreOperator(offset, type), arg_buffer,
Int32Constant(offset), Param(i + 1), *effect_,
*control_);
@@ -3407,27 +3395,22 @@ void WasmGraphBuilder::BuildCWasmEntry() {
*effect_ = start;
// Create parameter nodes (offset by 1 for the receiver parameter).
- Node* code_obj = nullptr;
- if (FLAG_wasm_jit_to_native) {
- Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
- MachineOperatorBuilder* machine = jsgraph()->machine();
- code_obj = graph()->NewNode(
- machine->Load(MachineType::Pointer()), foreign_code_obj,
- Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
- *effect_, *control_);
- } else {
- code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
- }
- Node* wasm_context = Param(CWasmEntryParameters::kWasmContext + 1);
+ Node* foreign_code_obj = Param(CWasmEntryParameters::kCodeObject + 1);
+ MachineOperatorBuilder* machine = jsgraph()->machine();
+ Node* code_obj = graph()->NewNode(
+ machine->Load(MachineType::Pointer()), foreign_code_obj,
+ Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag), *effect_,
+ *control_);
+ Node* instance_node = Param(CWasmEntryParameters::kWasmInstance + 1);
Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count = wasm_arg_count + 4; // code, wasm_context, control, effect
+ int arg_count = wasm_arg_count + 4; // code, instance_node, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context;
+ args[pos++] = instance_node;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3475,48 +3458,46 @@ void WasmGraphBuilder::BuildCWasmEntry() {
}
}
-void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
- DCHECK_NOT_NULL(wasm_context_);
+void WasmGraphBuilder::InitInstanceCache(
+ WasmInstanceCacheNodes* instance_cache) {
+ DCHECK_NOT_NULL(instance_node_);
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
// Load the memory start.
Node* mem_start = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, mem_start))),
+ jsgraph()->machine()->Load(MachineType::UintPtr()), instance_node_.get(),
+ jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryStart)),
*effect_, *control_);
*effect_ = mem_start;
- context_cache->mem_start = mem_start;
+ instance_cache->mem_start = mem_start;
// Load the memory size.
Node* mem_size = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, mem_size))),
+ jsgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemorySize)),
*effect_, *control_);
*effect_ = mem_size;
- context_cache->mem_size = mem_size;
+ instance_cache->mem_size = mem_size;
if (untrusted_code_mitigations_) {
// Load the memory mask.
Node* mem_mask = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
+ jsgraph()->machine()->Load(MachineType::Uint32()), instance_node_.get(),
+ jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(MemoryMask)),
*effect_, *control_);
*effect_ = mem_mask;
- context_cache->mem_mask = mem_mask;
+ instance_cache->mem_mask = mem_mask;
} else {
// Explicitly set to nullptr to ensure a SEGV when we try to use it.
- context_cache->mem_mask = nullptr;
+ instance_cache->mem_mask = nullptr;
}
}
-void WasmGraphBuilder::PrepareContextCacheForLoop(
- WasmContextCacheNodes* context_cache, Node* control) {
+void WasmGraphBuilder::PrepareInstanceCacheForLoop(
+ WasmInstanceCacheNodes* instance_cache, Node* control) {
#define INTRODUCE_PHI(field, rep) \
- context_cache->field = Phi(rep, 1, &context_cache->field, control);
+ instance_cache->field = Phi(rep, 1, &instance_cache->field, control);
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
@@ -3527,9 +3508,9 @@ void WasmGraphBuilder::PrepareContextCacheForLoop(
#undef INTRODUCE_PHI
}
-void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to,
- WasmContextCacheNodes* from,
- Node* merge) {
+void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
+ WasmInstanceCacheNodes* from,
+ Node* merge) {
#define INTRODUCE_PHI(field, rep) \
if (to->field != from->field) { \
Node* vals[] = {to->field, from->field}; \
@@ -3545,9 +3526,9 @@ void WasmGraphBuilder::NewContextCacheMerge(WasmContextCacheNodes* to,
#undef INTRODUCE_PHI
}
-void WasmGraphBuilder::MergeContextCacheInto(WasmContextCacheNodes* to,
- WasmContextCacheNodes* from,
- Node* merge) {
+void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
+ WasmInstanceCacheNodes* from,
+ Node* merge) {
to->mem_size = CreateOrMergeIntoPhi(MachineRepresentation::kWord32, merge,
to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
@@ -3591,21 +3572,21 @@ Node* WasmGraphBuilder::CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode,
void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
uint32_t offset, Node** base_node,
Node** offset_node) {
- DCHECK_NOT_NULL(wasm_context_);
+ DCHECK_NOT_NULL(instance_node_);
if (globals_start_ == nullptr) {
- // Load globals_start from the WasmContext at runtime.
+ // Load globals_start from the instance object at runtime.
// TODO(wasm): we currently generate only one load of the {globals_start}
// start per graph, which means it can be placed anywhere by the scheduler.
// This is legal because the globals_start should never change.
- // However, in some cases (e.g. if the WasmContext is already in a
+ // However, in some cases (e.g. if the instance object is already in a
// register), it is slightly more efficient to reload this value from the
- // WasmContext. Since this depends on register allocation, it is not
+ // instance object. Since this depends on register allocation, it is not
// possible to express in the graph, and would essentially constitute a
// "mem2reg" optimization in TurboFan.
globals_start_ = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, globals_start))),
+ jsgraph()->machine()->Load(MachineType::UintPtr()),
+ instance_node_.get(),
+ jsgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(GlobalsStart)),
graph()->start(), graph()->start());
}
*base_node = globals_start_.get();
@@ -3622,8 +3603,8 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
}
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
- DCHECK_NOT_NULL(context_cache_);
- Node* mem_start = context_cache_->mem_start;
+ DCHECK_NOT_NULL(instance_cache_);
+ Node* mem_start = instance_cache_->mem_start;
DCHECK_NOT_NULL(mem_start);
if (offset == 0) return mem_start;
return graph()->NewNode(jsgraph()->machine()->IntAdd(), mem_start,
@@ -3633,8 +3614,8 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages can not be called from asm.js.
DCHECK_EQ(wasm::kWasmOrigin, env_->module->origin());
- DCHECK_NOT_NULL(context_cache_);
- Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(instance_cache_);
+ Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
if (jsgraph()->machine()->Is64()) {
mem_size = graph()->NewNode(jsgraph()->machine()->TruncateInt64ToInt32(),
@@ -3645,47 +3626,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
-void WasmGraphBuilder::GetFunctionTableNodes(uint32_t table_index, Node** table,
- Node** table_size) {
- if (WASM_CONTEXT_TABLES) {
- // The table address and size are stored in the WasmContext.
- // Don't bother caching them, since they are only used in indirect calls,
- // which would cause them to be spilled on the stack anyway.
- *table = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, table))),
- *effect_, *control_);
- *table_size = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
- jsgraph()->Int32Constant(
- static_cast<int32_t>(offsetof(WasmContext, table_size))),
- *effect_, *control_);
- } else {
- // The function table nodes are relocatable constants.
- if (function_tables_.size() == 0) {
- size_t tables_size = env_->function_tables.size();
- for (size_t i = 0; i < tables_size; ++i) {
- wasm::GlobalHandleAddress function_handle_address =
- env_->function_tables[i];
- Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE);
- uint32_t table_size = env_->module->function_tables[i].initial_size;
- Node* size = jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
- function_tables_.push_back({table_addr, size});
- }
- }
- *table_size = function_tables_[table_index].size;
- *table =
- graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
- function_tables_[table_index].table_addr,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
- }
-}
-
Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
@@ -3782,8 +3722,8 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
- DCHECK_NOT_NULL(context_cache_);
- Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(instance_cache_);
+ Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
auto m = jsgraph()->machine();
@@ -3851,7 +3791,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
if (untrusted_code_mitigations_) {
// In the fallthrough case, condition the index with the memory mask.
- Node* mem_mask = context_cache_->mem_mask;
+ Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
index = graph()->NewNode(m->Word32And(), index, mem_mask);
}
@@ -4028,9 +3968,9 @@ Node* GetAsmJsOOBValue(MachineRepresentation rep, JSGraph* jsgraph) {
} // namespace
Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
- DCHECK_NOT_NULL(context_cache_);
- Node* mem_start = context_cache_->mem_start;
- Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(instance_cache_);
+ Node* mem_start = instance_cache_->mem_start;
+ Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_start);
DCHECK_NOT_NULL(mem_size);
@@ -4048,7 +3988,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
- Node* mem_mask = context_cache_->mem_mask;
+ Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
index =
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
@@ -4074,9 +4014,9 @@ Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
- DCHECK_NOT_NULL(context_cache_);
- Node* mem_start = context_cache_->mem_start;
- Node* mem_size = context_cache_->mem_size;
+ DCHECK_NOT_NULL(instance_cache_);
+ Node* mem_start = instance_cache_->mem_start;
+ Node* mem_size = instance_cache_->mem_size;
DCHECK_NOT_NULL(mem_start);
DCHECK_NOT_NULL(mem_size);
@@ -4092,7 +4032,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
if (untrusted_code_mitigations_) {
// Condition the index with the memory mask.
- Node* mem_mask = context_cache_->mem_mask;
+ Node* mem_mask = instance_cache_->mem_mask;
DCHECK_NOT_NULL(mem_mask);
index =
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
@@ -4542,40 +4482,76 @@ Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
inputs[0], inputs[1]);
}
-#define ATOMIC_BINOP_LIST(V) \
- V(I32AtomicAdd, Add, Uint32) \
- V(I32AtomicAdd8U, Add, Uint8) \
- V(I32AtomicAdd16U, Add, Uint16) \
- V(I32AtomicSub, Sub, Uint32) \
- V(I32AtomicSub8U, Sub, Uint8) \
- V(I32AtomicSub16U, Sub, Uint16) \
- V(I32AtomicAnd, And, Uint32) \
- V(I32AtomicAnd8U, And, Uint8) \
- V(I32AtomicAnd16U, And, Uint16) \
- V(I32AtomicOr, Or, Uint32) \
- V(I32AtomicOr8U, Or, Uint8) \
- V(I32AtomicOr16U, Or, Uint16) \
- V(I32AtomicXor, Xor, Uint32) \
- V(I32AtomicXor8U, Xor, Uint8) \
- V(I32AtomicXor16U, Xor, Uint16) \
- V(I32AtomicExchange, Exchange, Uint32) \
- V(I32AtomicExchange8U, Exchange, Uint8) \
- V(I32AtomicExchange16U, Exchange, Uint16)
-
-#define ATOMIC_TERNARY_LIST(V) \
- V(I32AtomicCompareExchange, CompareExchange, Uint32) \
- V(I32AtomicCompareExchange8U, CompareExchange, Uint8) \
- V(I32AtomicCompareExchange16U, CompareExchange, Uint16)
-
-#define ATOMIC_LOAD_LIST(V) \
- V(I32AtomicLoad, Uint32) \
- V(I32AtomicLoad8U, Uint8) \
- V(I32AtomicLoad16U, Uint16)
-
-#define ATOMIC_STORE_LIST(V) \
- V(I32AtomicStore, Uint32, kWord32) \
- V(I32AtomicStore8U, Uint8, kWord8) \
- V(I32AtomicStore16U, Uint16, kWord16)
+#define ATOMIC_BINOP_LIST(V) \
+ V(I32AtomicAdd, Add, Uint32, Word32) \
+ V(I64AtomicAdd, Add, Uint64, Word64) \
+ V(I32AtomicAdd8U, Add, Uint8, Word32) \
+ V(I32AtomicAdd16U, Add, Uint16, Word32) \
+ V(I64AtomicAdd8U, Add, Uint8, Word64) \
+ V(I64AtomicAdd16U, Add, Uint16, Word64) \
+ V(I64AtomicAdd32U, Add, Uint32, Word64) \
+ V(I32AtomicSub, Sub, Uint32, Word32) \
+ V(I64AtomicSub, Sub, Uint64, Word64) \
+ V(I32AtomicSub8U, Sub, Uint8, Word32) \
+ V(I32AtomicSub16U, Sub, Uint16, Word32) \
+ V(I64AtomicSub8U, Sub, Uint8, Word64) \
+ V(I64AtomicSub16U, Sub, Uint16, Word64) \
+ V(I64AtomicSub32U, Sub, Uint32, Word64) \
+ V(I32AtomicAnd, And, Uint32, Word32) \
+ V(I64AtomicAnd, And, Uint64, Word64) \
+ V(I32AtomicAnd8U, And, Uint8, Word32) \
+ V(I64AtomicAnd16U, And, Uint16, Word64) \
+ V(I32AtomicAnd16U, And, Uint16, Word32) \
+ V(I64AtomicAnd8U, And, Uint8, Word64) \
+ V(I64AtomicAnd32U, And, Uint32, Word64) \
+ V(I32AtomicOr, Or, Uint32, Word32) \
+ V(I64AtomicOr, Or, Uint64, Word64) \
+ V(I32AtomicOr8U, Or, Uint8, Word32) \
+ V(I32AtomicOr16U, Or, Uint16, Word32) \
+ V(I64AtomicOr8U, Or, Uint8, Word64) \
+ V(I64AtomicOr16U, Or, Uint16, Word64) \
+ V(I64AtomicOr32U, Or, Uint32, Word64) \
+ V(I32AtomicXor, Xor, Uint32, Word32) \
+ V(I64AtomicXor, Xor, Uint64, Word64) \
+ V(I32AtomicXor8U, Xor, Uint8, Word32) \
+ V(I32AtomicXor16U, Xor, Uint16, Word32) \
+ V(I64AtomicXor8U, Xor, Uint8, Word64) \
+ V(I64AtomicXor16U, Xor, Uint16, Word64) \
+ V(I64AtomicXor32U, Xor, Uint32, Word64) \
+ V(I32AtomicExchange, Exchange, Uint32, Word32) \
+ V(I64AtomicExchange, Exchange, Uint64, Word64) \
+ V(I32AtomicExchange8U, Exchange, Uint8, Word32) \
+ V(I32AtomicExchange16U, Exchange, Uint16, Word32) \
+ V(I64AtomicExchange8U, Exchange, Uint8, Word64) \
+ V(I64AtomicExchange16U, Exchange, Uint16, Word64) \
+ V(I64AtomicExchange32U, Exchange, Uint32, Word64)
+
+#define ATOMIC_CMP_EXCHG_LIST(V) \
+ V(I32AtomicCompareExchange, Uint32, Word32) \
+ V(I64AtomicCompareExchange, Uint64, Word64) \
+ V(I32AtomicCompareExchange8U, Uint8, Word32) \
+ V(I32AtomicCompareExchange16U, Uint16, Word32) \
+ V(I64AtomicCompareExchange8U, Uint8, Word64) \
+ V(I64AtomicCompareExchange16U, Uint16, Word64) \
+ V(I64AtomicCompareExchange32U, Uint32, Word64)
+
+#define ATOMIC_LOAD_LIST(V) \
+ V(I32AtomicLoad, Uint32, Word32) \
+ V(I64AtomicLoad, Uint64, Word64) \
+ V(I32AtomicLoad8U, Uint8, Word32) \
+ V(I32AtomicLoad16U, Uint16, Word32) \
+ V(I64AtomicLoad8U, Uint8, Word64) \
+ V(I64AtomicLoad16U, Uint16, Word64) \
+ V(I64AtomicLoad32U, Uint32, Word64)
+
+#define ATOMIC_STORE_LIST(V) \
+ V(I32AtomicStore, Uint32, kWord32, Word32) \
+ V(I64AtomicStore, Uint64, kWord64, Word64) \
+ V(I32AtomicStore8U, Uint8, kWord8, Word32) \
+ V(I32AtomicStore16U, Uint16, kWord16, Word32) \
+ V(I64AtomicStore8U, Uint8, kWord8, Word64) \
+ V(I64AtomicStore16U, Uint16, kWord16, Word64) \
+ V(I64AtomicStore32U, Uint32, kWord32, Word64)
Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
@@ -4583,54 +4559,55 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type, Prefix) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Prefix##Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
-#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
+#define BUILD_ATOMIC_CMP_EXCHG(Name, Type, Prefix) \
case wasm::kExpr##Name: { \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->Word32Atomic##Operation(MachineType::Type()), \
+ jsgraph()->machine()->Prefix##AtomicCompareExchange( \
+ MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
break; \
}
- ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
-#undef BUILD_ATOMIC_TERNARY_OP
+ ATOMIC_CMP_EXCHG_LIST(BUILD_ATOMIC_CMP_EXCHG)
+#undef BUILD_ATOMIC_CMP_EXCHG
-#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type, Prefix) \
case wasm::kExpr##Name: { \
Node* index = \
BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
- jsgraph()->machine()->Word32AtomicLoad(MachineType::Type()), \
+ jsgraph()->machine()->Prefix##AtomicLoad(MachineType::Type()), \
MemBuffer(offset), index, *effect_, *control_); \
break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = \
- BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
- inputs[0], offset, position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Word32AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep, Prefix) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Prefix##AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4642,7 +4619,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
}
#undef ATOMIC_BINOP_LIST
-#undef ATOMIC_TERNARY_LIST
+#undef ATOMIC_CMP_EXCHG_LIST
#undef ATOMIC_LOAD_LIST
#undef ATOMIC_STORE_LIST
@@ -4665,18 +4642,13 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
va_end(arguments);
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
- Handle<String> script_str =
- isolate->factory()->NewStringFromAsciiChecked("(wasm)");
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
- PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
- *script_str, 0, 0));
+ PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str));
}
} // namespace
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
- WasmCodeWrapper wasm_code, uint32_t index,
- Address wasm_context_address,
+ Handle<WeakCell> weak_instance,
+ wasm::WasmCode* wasm_code, uint32_t index,
bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
@@ -4696,17 +4668,14 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Node* effect = nullptr;
// TODO(titzer): compile JS to WASM wrappers without a {ModuleEnv}.
- ModuleEnv env(module,
- // TODO(mtrofin): remove the Illegal builtin when we don't need
- // FLAG_wasm_jit_to_native
- BUILTIN_CODE(isolate, Illegal), // default_function_code
- use_trap_handler);
+ ModuleEnv env(module, use_trap_handler);
WasmGraphBuilder builder(&env, &zone, &jsgraph,
- CEntryStub(isolate, 1).GetCode(), func->sig);
+ CEntryStub(isolate, 1).GetCode(),
+ isolate->factory()->null_value(), func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildJSToWasmWrapper(wasm_code, wasm_context_address);
+ builder.BuildJSToWasmWrapper(weak_instance, wasm_code);
//----------------------------------------------------------------------------
// Run the compilation pipeline.
@@ -4731,12 +4700,13 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Vector<const char> func_name = CStrVector("js-to-wasm");
#endif
- CompilationInfo info(func_name, &zone, Code::JS_TO_WASM_FUNCTION);
+ OptimizedCompilationInfo info(func_name, &zone, Code::JS_TO_WASM_FUNCTION);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
code->Disassemble(func_name.start(), os);
}
#endif
@@ -4790,10 +4760,10 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
} // namespace
-Handle<Code> CompileWasmToJSWrapper(
- Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
- uint32_t index, wasm::ModuleOrigin origin, bool use_trap_handler,
- Handle<FixedArray> global_js_imports_table) {
+Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
+ wasm::FunctionSig* sig, uint32_t index,
+ wasm::ModuleOrigin origin,
+ bool use_trap_handler) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -4813,101 +4783,13 @@ Handle<Code> CompileWasmToJSWrapper(
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- ModuleEnv env(nullptr, Handle<Code>::null(), use_trap_handler);
- WasmGraphBuilder builder(&env, &zone, &jsgraph,
- CEntryStub(isolate, 1).GetCode(), sig,
- source_position_table);
- builder.set_control_ptr(&control);
- builder.set_effect_ptr(&effect);
- if (builder.BuildWasmToJSWrapper(target, global_js_imports_table, index)) {
- global_js_imports_table->set(
- OffsetForImportData(index, WasmGraphBuilder::kFunction), *target);
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
- global_js_imports_table->set(
- OffsetForImportData(index, WasmGraphBuilder::kFunctionContext),
- function->context());
- global_js_imports_table->set(
- OffsetForImportData(index, WasmGraphBuilder::kGlobalProxy),
- function->context()->global_proxy());
- }
- }
-
- if (FLAG_trace_turbo_graph) { // Simple textual RPO.
- OFStream os(stdout);
- os << "-- Graph after change lowering -- " << std::endl;
- os << AsRPO(graph);
- }
-
- // Schedule and compile to machine code.
- CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
- if (machine.Is32()) {
- incoming = GetI32WasmCallDescriptor(&zone, incoming);
- }
-
-#ifdef DEBUG
- EmbeddedVector<char, 32> func_name;
- static unsigned id = 0;
- func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", id++));
-#else
- Vector<const char> func_name = CStrVector("wasm-to-js");
-#endif
-
- CompilationInfo info(func_name, &zone, Code::WASM_TO_JS_FUNCTION);
- Handle<Code> code = Pipeline::GenerateCodeForTesting(
- &info, isolate, incoming, &graph, nullptr, source_position_table);
- ValidateImportWrapperReferencesImmovables(code);
- Handle<FixedArray> deopt_data =
- isolate->factory()->NewFixedArray(2, TENURED);
- intptr_t loc =
- reinterpret_cast<intptr_t>(global_js_imports_table.location());
- Handle<Object> loc_handle = isolate->factory()->NewHeapNumberFromBits(loc);
- deopt_data->set(0, *loc_handle);
- Handle<Object> index_handle = isolate->factory()->NewNumberFromInt(
- OffsetForImportData(index, WasmGraphBuilder::kFunction));
- deopt_data->set(1, *index_handle);
- code->set_deoptimization_data(*deopt_data);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
- code->Disassemble(func_name.start(), os);
- }
-#endif
-
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "%.*s", func_name.length(), func_name.start());
- }
-
- return code;
-}
-
-Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
- wasm::FunctionSig* sig,
- Address new_wasm_context_address) {
- //----------------------------------------------------------------------------
- // Create the Graph
- //----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
-
- Node* control = nullptr;
- Node* effect = nullptr;
-
- ModuleEnv env(
- nullptr, Handle<Code>::null(),
- !target.IsCodeObject() && target.GetWasmCode()->HasTrapHandlerIndex());
- WasmGraphBuilder builder(&env, &zone, &jsgraph, Handle<Code>(), sig);
+ ModuleEnv env(nullptr, use_trap_handler);
+ WasmGraphBuilder builder(
+ &env, &zone, &jsgraph, CEntryStub(isolate, 1).GetCode(),
+ isolate->factory()->null_value(), sig, source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.BuildWasmToWasmWrapper(target, new_wasm_context_address);
- if (HasInt64ParamOrReturn(sig)) builder.LowerInt64();
+ builder.BuildWasmToJSWrapper(target, index);
if (FLAG_trace_turbo_graph) { // Simple textual RPO.
OFStream os(stdout);
@@ -4920,44 +4802,38 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- bool debugging =
-#if DEBUG
- true;
+
+#ifdef DEBUG
+ EmbeddedVector<char, 32> func_name;
+ static unsigned id = 0;
+ func_name.Truncate(SNPrintF(func_name, "wasm-to-js#%d", id++));
#else
- FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+ Vector<const char> func_name = CStrVector("wasm-to-js");
#endif
- Vector<const char> func_name = ArrayVector("wasm-to-wasm");
- static unsigned id = 0;
- Vector<char> buffer;
- if (debugging) {
- buffer = Vector<char>::New(128);
- int chars = SNPrintF(buffer, "wasm-to-wasm#%d", id);
- func_name = Vector<const char>::cast(buffer.SubVector(0, chars));
- }
- CompilationInfo info(func_name, &zone, Code::WASM_TO_WASM_FUNCTION);
- Handle<Code> code =
- Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
+ OptimizedCompilationInfo info(func_name, &zone, Code::WASM_TO_JS_FUNCTION);
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ &info, isolate, incoming, &graph, nullptr, source_position_table);
+ ValidateImportWrapperReferencesImmovables(code);
+
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(func_name.start(), os);
}
#endif
- if (debugging) {
- buffer.Dispose();
- }
- if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
+
+ if (must_record_function_compilation(isolate)) {
RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code,
- "wasm-to-wasm");
+ "%.*s", func_name.length(), func_name.start());
}
return code;
}
Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
- wasm::FunctionSig* sig,
- Handle<WasmInstanceObject> instance) {
+ wasm::FunctionSig* sig) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -4974,7 +4850,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Node* effect = nullptr;
WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
- CEntryStub(isolate, 1).GetCode(), sig);
+ CEntryStub(isolate, 1).GetCode(),
+ isolate->factory()->null_value(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmInterpreterEntry(func_index);
@@ -5000,12 +4877,14 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Vector<const char> func_name = CStrVector("wasm-interpreter-entry");
#endif
- CompilationInfo info(func_name, &zone, Code::WASM_INTERPRETER_ENTRY);
+ OptimizedCompilationInfo info(func_name, &zone,
+ Code::WASM_INTERPRETER_ENTRY);
code = Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph,
nullptr);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
code->Disassemble(func_name.start(), os);
}
#endif
@@ -5016,13 +4895,6 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
}
}
- if (!FLAG_wasm_jit_to_native) {
- Handle<FixedArray> deopt_data =
- isolate->factory()->NewFixedArray(1, TENURED);
- Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
- deopt_data->set(0, *weak_instance);
- code->set_deoptimization_data(*deopt_data);
- }
return code;
}
@@ -5040,7 +4912,8 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
Node* effect = nullptr;
WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
- CEntryStub(isolate, 1).GetCode(), sig);
+ CEntryStub(isolate, 1).GetCode(),
+ isolate->factory()->null_value(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildCWasmEntry();
@@ -5073,12 +4946,13 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
debug_name[name_len] = '\0';
Vector<const char> debug_name_vec(debug_name, name_len);
- CompilationInfo info(debug_name_vec, &zone, Code::C_WASM_ENTRY);
+ OptimizedCompilationInfo info(debug_name_vec, &zone, Code::C_WASM_ENTRY);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, isolate, incoming, &graph);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
code->Disassemble(debug_name, os);
}
#endif
@@ -5086,9 +4960,25 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
return code;
}
+WasmCompilationData::WasmCompilationData(
+ RuntimeExceptionSupport runtime_exception_support)
+ : protected_instructions_(
+ new std::vector<trap_handler::ProtectedInstructionData>()),
+ runtime_exception_support_(runtime_exception_support) {}
+
+void WasmCompilationData::AddProtectedInstruction(uint32_t instr_offset,
+ uint32_t landing_offset) {
+ protected_instructions_->emplace_back(
+ trap_handler::ProtectedInstructionData{instr_offset, landing_offset});
+}
+
+int FixedArrayOffsetMinusTag(uint32_t index) {
+ auto access = AccessBuilder::ForFixedArraySlot(index);
+ return access.offset - access.tag();
+}
+
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
-
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -5097,9 +4987,13 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
SourcePositionTable* source_position_table =
new (tf_.jsgraph_->zone()) SourcePositionTable(tf_.jsgraph_->graph());
+ // We get the handle for {null_value()} directly from the isolate although we
+ // are on a background task because the handle is stored in the isolate
+ // anyways, and it is immortal and immovable.
WasmGraphBuilder builder(env_, tf_.jsgraph_->zone(), tf_.jsgraph_,
- centry_stub_, func_body_.sig, source_position_table,
- runtime_exception_support_);
+ centry_stub_, isolate_->factory()->null_value(),
+ func_body_.sig, source_position_table,
+ wasm_compilation_data_.runtime_exception_support());
tf_.graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
if (tf_.graph_construction_result_.failed()) {
@@ -5170,11 +5064,9 @@ WasmCompilationUnit::WasmCompilationUnit(
counters_(counters ? counters : isolate->counters()),
centry_stub_(centry_stub),
func_index_(index),
- runtime_exception_support_(exception_support),
native_module_(native_module),
lower_simd_(lower_simd),
- protected_instructions_(
- new std::vector<trap_handler::ProtectedInstructionData>()),
+ wasm_compilation_data_(exception_support),
mode_(mode) {
switch (mode_) {
case WasmCompilationUnit::CompilationMode::kLiftoff:
@@ -5209,12 +5101,7 @@ void WasmCompilationUnit::ExecuteCompilation() {
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
if (FLAG_trace_wasm_compiler) {
- if (func_name_.start() != nullptr) {
- PrintF("Compiling wasm function %d:'%.*s'\n\n", func_index(),
- func_name_.length(), func_name_.start());
- } else {
- PrintF("Compiling wasm function %d:<unnamed>\n\n", func_index());
- }
+ PrintF("Compiling wasm function %d\n\n", func_index_);
}
switch (mode_) {
@@ -5268,14 +5155,13 @@ void WasmCompilationUnit::ExecuteTurbofanCompilation() {
call_descriptor = GetI32WasmCallDescriptor(tf_.compilation_zone_.get(),
call_descriptor);
}
- tf_.info_.reset(new CompilationInfo(
+ tf_.info_.reset(new OptimizedCompilationInfo(
GetDebugName(tf_.compilation_zone_.get(), func_name_, func_index_),
tf_.compilation_zone_.get(), Code::WASM_FUNCTION));
tf_.job_.reset(Pipeline::NewWasmCompilationJob(
tf_.info_.get(), isolate_, tf_.jsgraph_, call_descriptor,
- source_positions, protected_instructions_.get(),
- env_->module->origin()));
+ source_positions, &wasm_compilation_data_, env_->module->origin()));
ok_ = tf_.job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
@@ -5301,9 +5187,9 @@ void WasmCompilationUnit::ExecuteTurbofanCompilation() {
// WasmCompilationUnit::ExecuteLiftoffCompilation() is defined in
// liftoff-compiler.cc.
-WasmCodeWrapper WasmCompilationUnit::FinishCompilation(
+wasm::WasmCode* WasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
- WasmCodeWrapper ret;
+ wasm::WasmCode* ret;
switch (mode_) {
case WasmCompilationUnit::CompilationMode::kLiftoff:
ret = FinishLiftoffCompilation(thrower);
@@ -5314,13 +5200,13 @@ WasmCodeWrapper WasmCompilationUnit::FinishCompilation(
default:
UNREACHABLE();
}
- if (!ret.IsCodeObject() && ret.is_null()) {
+ if (ret == nullptr) {
thrower->RuntimeError("Error finalizing code.");
}
return ret;
}
-WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
+wasm::WasmCode* WasmCompilationUnit::FinishTurbofanCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
if (tf_.graph_construction_result_.failed()) {
@@ -5336,7 +5222,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
thrower->CompileFailed(message.start(), tf_.graph_construction_result_);
}
- return {};
+ return nullptr;
}
base::ElapsedTimer codegen_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -5344,93 +5230,34 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
}
if (tf_.job_->FinalizeJob(isolate_) != CompilationJob::SUCCEEDED) {
- return {};
+ return nullptr;
}
- if (!FLAG_wasm_jit_to_native) {
- Handle<Code> code = tf_.info_->code();
- DCHECK(!code.is_null());
-
- if (FLAG_trace_wasm_decode_time) {
- double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
- static_cast<unsigned>(func_body_.end - func_body_.start),
- codegen_ms);
- }
-
- PackProtectedInstructions(code);
- return WasmCodeWrapper(code);
- } else {
- // TODO(mtrofin): when we crystalize a design in lieu of WasmCodeDesc, that
- // works for both wasm and non-wasm, we can simplify AddCode to just take
- // that as a parameter.
- const CodeDesc& desc =
- tf_.job_->compilation_info()->wasm_code_desc()->code_desc;
- wasm::WasmCode* code = native_module_->AddCode(
- desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
- func_index_,
- tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- tf_.job_->compilation_info()->wasm_code_desc()->handler_table_offset,
- std::move(protected_instructions_), false);
- if (!code) {
- return WasmCodeWrapper(code);
- }
- if (FLAG_trace_wasm_decode_time) {
- double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
- static_cast<unsigned>(func_body_.end - func_body_.start),
- codegen_ms);
- }
-
- PROFILE(isolate_,
- CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
-
- Handle<ByteArray> source_positions =
- tf_.job_->compilation_info()->wasm_code_desc()->source_positions_table;
-
- native_module_->compiled_module()->source_positions()->set(
- func_index_, *source_positions);
-#ifdef ENABLE_DISASSEMBLER
- // Note: only do this after setting source positions, as this will be
- // accessed and printed here.
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- PrintF("--- Native Wasm code ---\n");
- code->Print(isolate_);
- PrintF("--- End code ---\n");
- }
-#endif
-
- // TODO(mtrofin): this should probably move up in the common caller,
- // once liftoff has source positions. Until then, we'd need to handle
- // undefined values, which is complicating the code.
- LOG_CODE_EVENT(isolate_,
- CodeLinePosInfoRecordEvent(code->instructions().start(),
- *source_positions));
- return WasmCodeWrapper(code);
+ // TODO(mtrofin): when we crystalize a design in lieu of WasmCodeDesc, that
+ // works for both wasm and non-wasm, we can simplify AddCode to just take
+ // that as a parameter.
+ const CodeDesc& desc =
+ tf_.job_->compilation_info()->wasm_code_desc()->code_desc;
+ wasm::WasmCode* code = native_module_->AddCode(
+ desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
+ func_index_,
+ tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
+ tf_.job_->compilation_info()->wasm_code_desc()->handler_table_offset,
+ wasm_compilation_data_.ReleaseProtectedInstructions(),
+ tf_.job_->compilation_info()->wasm_code_desc()->source_positions_table,
+ wasm::WasmCode::kTurbofan);
+ if (!code) return code;
+ if (FLAG_trace_wasm_decode_time) {
+ double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+ PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start),
+ codegen_ms);
}
-}
-// TODO(mtrofin): remove when FLAG_wasm_jit_to_native is not needed
-void WasmCompilationUnit::PackProtectedInstructions(Handle<Code> code) const {
- if (protected_instructions_->empty()) return;
- DCHECK_LT(protected_instructions_->size(), std::numeric_limits<int>::max());
- const int num_instructions =
- static_cast<int>(protected_instructions_->size());
- Handle<FixedArray> fn_protected = isolate_->factory()->NewFixedArray(
- num_instructions * Code::kTrapDataSize, TENURED);
- for (int i = 0; i < num_instructions; ++i) {
- const trap_handler::ProtectedInstructionData& instruction =
- protected_instructions_->at(i);
- fn_protected->set(Code::kTrapDataSize * i + Code::kTrapCodeOffset,
- Smi::FromInt(instruction.instr_offset));
- fn_protected->set(Code::kTrapDataSize * i + Code::kTrapLandingOffset,
- Smi::FromInt(instruction.landing_offset));
- }
- code->set_protected_instructions(*fn_protected);
+ return code;
}
-WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
+wasm::WasmCode* WasmCompilationUnit::FinishLiftoffCompilation(
wasm::ErrorThrower* thrower) {
CodeDesc desc;
liftoff_.asm_.GetCode(isolate_, &desc);
@@ -5438,57 +5265,17 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
Handle<ByteArray> source_positions =
liftoff_.source_position_table_builder_.ToSourcePositionTable(isolate_);
- WasmCodeWrapper ret;
- if (!FLAG_wasm_jit_to_native) {
- Handle<Code> code;
- code = isolate_->factory()->NewCode(
- desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
- source_positions, MaybeHandle<DeoptimizationData>(), kMovable,
- 0, // stub_key
- false, // is_turbofanned
- liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
- liftoff_.safepoint_table_offset_);
- if (isolate_->logger()->is_logging_code_events() ||
- isolate_->is_profiling()) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
- "wasm#%d-liftoff", func_index_);
- }
+ wasm::WasmCode* code = native_module_->AddCode(
+ desc, liftoff_.asm_.GetTotalFrameSlotCount(), func_index_,
+ liftoff_.safepoint_table_offset_, 0,
+ wasm_compilation_data_.ReleaseProtectedInstructions(), source_positions,
+ wasm::WasmCode::kLiftoff);
- PackProtectedInstructions(code);
- ret = WasmCodeWrapper(code);
- } else {
- // TODO(herhut) Consider lifting it to FinishCompilation.
- native_module_->compiled_module()->source_positions()->set(
- func_index_, *source_positions);
- wasm::WasmCode* code =
- native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
- func_index_, liftoff_.safepoint_table_offset_,
- 0, std::move(protected_instructions_), true);
- PROFILE(isolate_,
- CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code, func_name_));
- ret = WasmCodeWrapper(code);
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- OFStream os(stdout);
- os << "--- Wasm liftoff code ---\n";
- EmbeddedVector<char, 64> func_name;
- if (func_name_.start() != nullptr) {
- SNPrintF(func_name, "#%d:%.*s", func_index(), func_name_.length(),
- func_name_.start());
- } else {
- SNPrintF(func_name, "wasm#%d", func_index());
- }
- ret.Disassemble(func_name.start(), isolate_, os);
- os << "--- End code ---\n";
- }
-#endif
- return ret;
+ return code;
}
// static
-WasmCodeWrapper WasmCompilationUnit::CompileWasmFunction(
+wasm::WasmCode* WasmCompilationUnit::CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
const wasm::WasmFunction* function, CompilationMode mode) {
@@ -5498,7 +5285,7 @@ WasmCodeWrapper WasmCompilationUnit::CompileWasmFunction(
wire_bytes.start() + function->code.end_offset()};
WasmCompilationUnit unit(isolate, env, native_module, function_body,
- wire_bytes.GetNameOrNull(function),
+ wire_bytes.GetNameOrNull(function, env->module),
function->func_index,
CEntryStub(isolate, 1).GetCode(), mode);
unit.ExecuteCompilation();
@@ -5507,6 +5294,9 @@ WasmCodeWrapper WasmCompilationUnit::CompileWasmFunction(
#undef WASM_64
#undef FATAL_UNSUPPORTED_OPCODE
+#undef WASM_INSTANCE_OBJECT_OFFSET
+#undef LOAD_INSTANCE_FIELD
+#undef LOAD_FIXED_ARRAY_SLOT
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index e23fd4fe14..7ca36b56c0 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -9,8 +9,8 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
-#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/optimized-compilation-info.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder.h"
@@ -43,60 +43,19 @@ class WasmCode;
namespace compiler {
-// Indirect function tables contain a <smi(sig), code> pair for each entry.
-enum FunctionTableEntries : int {
- kFunctionTableSignatureOffset = 0,
- kFunctionTableCodeOffset = 1,
- kFunctionTableEntrySize = 2
-};
-constexpr inline int FunctionTableSigOffset(int i) {
- return kFunctionTableEntrySize * i + kFunctionTableSignatureOffset;
-}
-constexpr inline int FunctionTableCodeOffset(int i) {
- return kFunctionTableEntrySize * i + kFunctionTableCodeOffset;
-}
-
// The {ModuleEnv} encapsulates the module data that is used by the
-// {WasmGraphBuilder} during graph building. It represents the parameters to
-// which the compiled code should be specialized, including which code to call
-// for direct calls {function_code}, which tables to use for indirect calls
-// {function_tables}, memory start address and size {mem_start, mem_size},
-// as well as the module itself {module}.
+// {WasmGraphBuilder} during graph building.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
// A pointer to the decoded module's static representation.
const wasm::WasmModule* module;
- // The function tables are FixedArrays of <smi, code> pairs used to signature
- // check and dispatch indirect calls. It has the same length as
- // module.function_tables. We use the address to a global handle to the
- // FixedArray.
- const std::vector<Address> function_tables;
-
- // TODO(mtrofin): remove these 2 once we don't need FLAG_wasm_jit_to_native
- // Contains the code objects to call for each direct call.
- // (the same length as module.functions)
- const std::vector<Handle<Code>> function_code;
- // If the default code is not a null handle, always use it for direct calls.
- const Handle<Code> default_function_code;
+
// True if trap handling should be used in compiled code, rather than
// compiling in bounds checks for each memory access.
const bool use_trap_handler;
- ModuleEnv(const wasm::WasmModule* module, Handle<Code> default_function_code,
- bool use_trap_handler)
- : module(module),
- default_function_code(default_function_code),
- use_trap_handler(use_trap_handler) {}
-
- ModuleEnv(const wasm::WasmModule* module,
- std::vector<Address> function_tables,
- std::vector<Handle<Code>> function_code,
- Handle<Code> default_function_code, bool use_trap_handler)
- : module(module),
- function_tables(std::move(function_tables)),
- function_code(std::move(function_code)),
- default_function_code(default_function_code),
- use_trap_handler(use_trap_handler) {}
+ ModuleEnv(const wasm::WasmModule* module, bool use_trap_handler)
+ : module(module), use_trap_handler(use_trap_handler) {}
};
enum RuntimeExceptionSupport : bool {
@@ -104,6 +63,33 @@ enum RuntimeExceptionSupport : bool {
kNoRuntimeExceptionSupport = false
};
+// Information about Wasm compilation that needs to be plumbed through the
+// different layers of the compiler.
+class WasmCompilationData {
+ public:
+ explicit WasmCompilationData(RuntimeExceptionSupport);
+
+ void AddProtectedInstruction(uint32_t instr_offset, uint32_t landing_offset);
+
+ std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ ReleaseProtectedInstructions() {
+ return std::move(protected_instructions_);
+ }
+
+ RuntimeExceptionSupport runtime_exception_support() const {
+ return runtime_exception_support_;
+ }
+
+ private:
+ std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ protected_instructions_;
+
+ // See WasmGraphBuilder::runtime_exception_support_.
+ const RuntimeExceptionSupport runtime_exception_support_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmCompilationData);
+};
+
class WasmCompilationUnit final {
public:
enum class CompilationMode : uint8_t { kLiftoff, kTurbofan };
@@ -124,22 +110,19 @@ class WasmCompilationUnit final {
~WasmCompilationUnit();
- int func_index() const { return func_index_; }
-
void ExecuteCompilation();
- WasmCodeWrapper FinishCompilation(wasm::ErrorThrower* thrower);
+ wasm::WasmCode* FinishCompilation(wasm::ErrorThrower* thrower);
- static WasmCodeWrapper CompileWasmFunction(
+ static wasm::WasmCode* CompileWasmFunction(
wasm::NativeModule* native_module, wasm::ErrorThrower* thrower,
Isolate* isolate, const wasm::ModuleWireBytes& wire_bytes, ModuleEnv* env,
const wasm::WasmFunction* function,
CompilationMode = GetDefaultCompilationMode());
size_t memory_cost() const { return memory_cost_; }
+ wasm::NativeModule* native_module() const { return native_module_; }
private:
- void PackProtectedInstructions(Handle<Code> code) const;
-
struct LiftoffData {
wasm::LiftoffAssembler asm_;
int safepoint_table_offset_;
@@ -157,19 +140,19 @@ class WasmCompilationUnit final {
// ExecuteCompilation, onto FinishCompilation (which happens on the main
// thread).
std::unique_ptr<Zone> compilation_zone_;
- std::unique_ptr<CompilationInfo> info_;
- std::unique_ptr<CompilationJob> job_;
+ std::unique_ptr<OptimizedCompilationInfo> info_;
+ std::unique_ptr<OptimizedCompilationJob> job_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
};
// Turbofan.
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
void ExecuteTurbofanCompilation();
- WasmCodeWrapper FinishTurbofanCompilation(wasm::ErrorThrower*);
+ wasm::WasmCode* FinishTurbofanCompilation(wasm::ErrorThrower*);
// Liftoff.
bool ExecuteLiftoffCompilation();
- WasmCodeWrapper FinishLiftoffCompilation(wasm::ErrorThrower*);
+ wasm::WasmCode* FinishLiftoffCompilation(wasm::ErrorThrower*);
Isolate* isolate_;
ModuleEnv* env_;
@@ -178,14 +161,11 @@ class WasmCompilationUnit final {
Counters* counters_;
Handle<Code> centry_stub_;
int func_index_;
- // See WasmGraphBuilder::runtime_exception_support_.
- RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
size_t memory_cost_ = 0;
wasm::NativeModule* native_module_;
bool lower_simd_;
- std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
- protected_instructions_;
+ WasmCompilationData wasm_compilation_data_;
CompilationMode mode_;
// {liftoff_} is valid if mode_ == kLiftoff, tf_ if mode_ == kTurbofan.
union {
@@ -199,35 +179,28 @@ class WasmCompilationUnit final {
};
// Wraps a JS function, producing a code object that can be called from wasm.
-// The global_js_imports_table is a global handle to a fixed array of target
-// JSReceiver with the lifetime tied to the module. We store it's location (non
-// GCable) in the generated code so that it can reside outside of GCed heap.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
wasm::ModuleOrigin origin,
- bool use_trap_handler,
- Handle<FixedArray> global_js_imports_table);
+ bool use_trap_handler);
// Wraps a given wasm code object, producing a code object.
V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(
- Isolate* isolate, wasm::WasmModule* module, WasmCodeWrapper wasm_code,
- uint32_t index, Address wasm_context_address, bool use_trap_handler);
-
-// Wraps a wasm function, producing a code object that can be called from other
-// wasm instances (the WasmContext address must be changed).
-Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
- wasm::FunctionSig* sig,
- Address new_wasm_context_address);
+ Isolate* isolate, wasm::WasmModule* module, Handle<WeakCell> weak_instance,
+ wasm::WasmCode* wasm_code, uint32_t index, bool use_trap_handler);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
- wasm::FunctionSig* sig,
- Handle<WasmInstanceObject> instance);
+ wasm::FunctionSig* sig);
+
+// Helper function to get the offset into a fixed array for a given {index}.
+// TODO(titzer): access-builder.h is not accessible outside compiler. Move?
+int FixedArrayOffsetMinusTag(uint32_t index);
enum CWasmEntryParameters {
kCodeObject,
- kWasmContext,
+ kWasmInstance,
kArgumentsBuffer,
// marker:
kNumParameters
@@ -238,12 +211,11 @@ enum CWasmEntryParameters {
// buffer and calls the wasm function given as first parameter.
Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
-// Values from the {WasmContext} are cached between WASM-level function calls.
+// Values from the instance object are cached between WASM-level function calls.
// This struct allows the SSA environment handling this cache to be defined
// and manipulated in wasm-compiler.{h,cc} instead of inside the WASM decoder.
-// (Note that currently, the globals base is immutable in a context, so not
-// cached here.)
-struct WasmContextCacheNodes {
+// (Note that currently, the globals base is immutable, so not cached here.)
+struct WasmInstanceCacheNodes {
Node* mem_start;
Node* mem_size;
Node* mem_mask;
@@ -254,10 +226,15 @@ struct WasmContextCacheNodes {
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
- enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
+ enum EnforceBoundsCheck : bool {
+ kNeedsBoundsCheck = true,
+ kCanOmitBoundsCheck = false
+ };
+ enum UseRetpoline : bool { kRetpoline = true, kNoRetpoline = false };
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
- Handle<Code> centry_stub, wasm::FunctionSig* sig,
+ Handle<Code> centry_stub, Handle<Oddball> anyref_null,
+ wasm::FunctionSig* sig,
compiler::SourcePositionTable* spt = nullptr,
RuntimeExceptionSupport res = kRuntimeExceptionSupport);
@@ -292,6 +269,7 @@ class WasmGraphBuilder {
Node* IntPtrConstant(intptr_t value);
Node* Float32Constant(float value);
Node* Float64Constant(double value);
+ Node* RefNull() { return anyref_null_node_; }
Node* HeapConstant(Handle<HeapObject> value);
Node* Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
wasm::WasmCodePosition position = wasm::kNoCodePosition);
@@ -351,29 +329,16 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t index, Node** args, Node*** rets,
wasm::WasmCodePosition position);
- void BuildJSToWasmWrapper(WasmCodeWrapper wasm_code_start,
- Address wasm_context_address);
- enum ImportDataType {
- kFunction = 1,
- kGlobalProxy = 2,
- kFunctionContext = 3,
- };
- Node* LoadImportDataAtOffset(int offset, Node* table);
- Node* LoadNativeContext(Node* table);
- Node* LoadImportData(int index, ImportDataType type, Node* table);
+ void BuildJSToWasmWrapper(Handle<WeakCell> weak_instance,
+ wasm::WasmCode* wasm_code);
bool BuildWasmToJSWrapper(Handle<JSReceiver> target,
- Handle<FixedArray> global_js_imports_table,
int index);
- void BuildWasmToWasmWrapper(WasmCodeWrapper wasm_code_start,
- Address new_wasm_context_address);
void BuildWasmInterpreterEntry(uint32_t func_index);
void BuildCWasmEntry();
Node* ToJS(Node* node, wasm::ValueType type);
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type);
Node* Invert(Node* node);
- void GetFunctionTableNodes(uint32_t table_index, Node** table,
- Node** table_size);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -391,8 +356,8 @@ class WasmGraphBuilder {
wasm::ValueType type);
static void PrintDebugName(Node* node);
- void set_wasm_context(Node* wasm_context) {
- this->wasm_context_ = wasm_context;
+ void set_instance_node(Node* instance_node) {
+ this->instance_node_ = instance_node;
}
Node* Control() { return *control_; }
@@ -405,17 +370,17 @@ class WasmGraphBuilder {
void GetGlobalBaseAndOffset(MachineType mem_type, uint32_t offset,
Node** base_node, Node** offset_node);
- // Utilities to manipulate sets of context cache nodes.
- void InitContextCache(WasmContextCacheNodes* context_cache);
- void PrepareContextCacheForLoop(WasmContextCacheNodes* context_cache,
- Node* control);
- void NewContextCacheMerge(WasmContextCacheNodes* to,
- WasmContextCacheNodes* from, Node* merge);
- void MergeContextCacheInto(WasmContextCacheNodes* to,
- WasmContextCacheNodes* from, Node* merge);
-
- void set_context_cache(WasmContextCacheNodes* context_cache) {
- this->context_cache_ = context_cache;
+ // Utilities to manipulate sets of instance cache nodes.
+ void InitInstanceCache(WasmInstanceCacheNodes* instance_cache);
+ void PrepareInstanceCacheForLoop(WasmInstanceCacheNodes* instance_cache,
+ Node* control);
+ void NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
+ WasmInstanceCacheNodes* from, Node* merge);
+ void MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
+ WasmInstanceCacheNodes* from, Node* merge);
+
+ void set_instance_cache(WasmInstanceCacheNodes* instance_cache) {
+ this->instance_cache_ = instance_cache;
}
wasm::FunctionSig* GetFunctionSignature() { return sig_; }
@@ -459,18 +424,18 @@ class WasmGraphBuilder {
Zone* const zone_;
JSGraph* const jsgraph_;
Node* const centry_stub_node_;
+ Node* const anyref_null_node_;
// env_ == nullptr means we're not compiling Wasm functions, such as for
// wrappers or interpreter stubs.
ModuleEnv* const env_ = nullptr;
- SetOncePointer<Node> wasm_context_;
+ SetOncePointer<Node> instance_node_;
struct FunctionTableNodes {
Node* table_addr;
Node* size;
};
- ZoneVector<FunctionTableNodes> function_tables_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
- WasmContextCacheNodes* context_cache_ = nullptr;
+ WasmInstanceCacheNodes* instance_cache_ = nullptr;
SetOncePointer<Node> globals_start_;
Node** cur_buffer_;
size_t cur_bufsize_;
@@ -507,8 +472,8 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
- wasm::WasmCodePosition position,
- Node* wasm_context = nullptr);
+ wasm::WasmCodePosition position, Node* instance_node,
+ UseRetpoline use_retpoline);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
@@ -626,12 +591,15 @@ class WasmGraphBuilder {
Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason);
};
-// The parameter index where the wasm_context paramter should be placed in wasm
+// The parameter index where the instance parameter should be placed in wasm
// call descriptors. This is used by the Int64Lowering::LowerNode method.
-constexpr int kWasmContextParameterIndex = 0;
+constexpr int kWasmInstanceParameterIndex = 0;
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, wasm::FunctionSig* signature);
+ Zone* zone, wasm::FunctionSig* signature,
+ WasmGraphBuilder::UseRetpoline use_retpoline =
+ WasmGraphBuilder::kNoRetpoline);
+
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, CallDescriptor* call_descriptor);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index cef127f334..caf38c8d8c 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -32,6 +32,8 @@ MachineType MachineTypeFor(ValueType type) {
return MachineType::Float32();
case wasm::kWasmS128:
return MachineType::Simd128();
+ case wasm::kWasmAnyRef:
+ return MachineType::TaggedPointer();
default:
UNREACHABLE();
}
@@ -223,16 +225,18 @@ static constexpr Allocator parameter_registers(kGPParamRegisters,
} // namespace
// General code uses the above configuration data.
-CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
- // The '+ 1' here is to accomodate the wasm_context as first parameter.
+CallDescriptor* GetWasmCallDescriptor(
+ Zone* zone, wasm::FunctionSig* fsig,
+ WasmGraphBuilder::UseRetpoline use_retpoline) {
+ // The '+ 1' here is to accomodate the instance object as first parameter.
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1);
// Add register and/or stack parameter(s).
Allocator params = parameter_registers;
- // The wasm_context.
- locations.AddParam(params.Next(MachineType::PointerRepresentation()));
+ // The instance object.
+ locations.AddParam(params.Next(MachineRepresentation::kTaggedPointer));
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
@@ -256,24 +260,22 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
const RegList kCalleeSaveFPRegisters = 0;
// The target for wasm calls is always a code object.
- MachineType target_type = FLAG_wasm_jit_to_native ? MachineType::Pointer()
- : MachineType::AnyTagged();
+ MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Kind kind = FLAG_wasm_jit_to_native
- ? CallDescriptor::kCallWasmFunction
- : CallDescriptor::kCallCodeObject;
-
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kNoFlags, // flags
+ CallDescriptor::Kind kind = CallDescriptor::kCallWasmFunction;
+
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ use_retpoline ? CallDescriptor::kRetpoline
+ : CallDescriptor::kNoFlags, // flags
"wasm-call", // debug name
0, // allocatable registers
rets.stack_offset - params.stack_offset); // stack_return_count
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index cc6d758a9a..43d6cbefb6 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -6,12 +6,12 @@
#include <limits>
-#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h"
+#include "src/optimized-compilation-info.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -257,42 +257,83 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
-class WasmOutOfLineTrap final : public OutOfLineCode {
+class WasmOutOfLineTrap : public OutOfLineCode {
public:
- WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
- Instruction* instr)
+ WasmOutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
: OutOfLineCode(gen),
gen_(gen),
- pc_(pc),
frame_elided_(frame_elided),
instr_(instr) {}
- // TODO(eholk): Refactor this method to take the code generator as a
- // parameter.
- void Generate() final {
- gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
+ void Generate() override {
+ X64OperandConverter i(gen_, instr_);
+
+ Builtins::Name trap_id =
+ static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateWithTrapId(trap_id);
+ }
+
+ protected:
+ CodeGenerator* gen_;
+ void GenerateWithTrapId(Builtins::Name trap_id) {
+ bool old_has_frame = __ has_frame();
if (frame_elided_) {
+ __ set_has_frame(true);
__ EnterFrame(StackFrame::WASM_COMPILED);
}
-
- gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(
- Builtins::kThrowWasmTrapMemOutOfBounds),
- RelocInfo::CODE_TARGET);
- ReferenceMap* reference_map = new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
}
private:
- CodeGenerator* gen_;
- int pc_;
+ void GenerateCallToTrap(Builtins::Name trap_id) {
+ if (!gen_->wasm_runtime_exception_support()) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(__ isolate()),
+ 0);
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
+ // Use rcx as a scratch register, we return anyways immediately.
+ __ Ret(static_cast<int>(pop_size), rcx);
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
+ RelocInfo::CODE_TARGET);
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
+ }
+ }
+
bool frame_elided_;
Instruction* instr_;
};
+class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
+ public:
+ WasmProtectedInstructionTrap(CodeGenerator* gen, int pc, bool frame_elided,
+ Instruction* instr)
+ : WasmOutOfLineTrap(gen, frame_elided, instr), pc_(pc) {}
+
+ void Generate() final {
+ gen_->AddProtectedInstructionLanding(pc_, __ pc_offset());
+
+ GenerateWithTrapId(Builtins::kThrowWasmTrapMemOutOfBounds);
+ }
+
+ private:
+ int pc_;
+};
+
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
X64OperandConverter& i, int pc) {
@@ -300,7 +341,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessProtected) {
const bool frame_elided = !codegen->frame_access_state()->has_frame();
- new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, instr);
+ new (zone) WasmProtectedInstructionTrap(codegen, pc, frame_elided, instr);
}
}
@@ -482,6 +523,18 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ j(not_equal, &binop); \
} while (false)
+#define ASSEMBLE_ATOMIC64_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
+ do { \
+ Label binop; \
+ __ bind(&binop); \
+ __ mov_inst(rax, i.MemoryOperand(1)); \
+ __ movq(i.TempRegister(0), rax); \
+ __ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
+ __ lock(); \
+ __ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
+ __ j(not_equal, &binop); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -606,11 +659,11 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
-void CodeGenerator::GenerateSpeculationPoison() {
+void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(rbx);
- __ movp(kSpeculationPoisonRegister, Immediate(0));
+ __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
__ cmpp(kJavaScriptCallCodeStartRegister, rbx);
__ movp(rbx, Immediate(-1));
__ cmovq(equal, kSpeculationPoisonRegister, rbx);
@@ -811,6 +864,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
}
+ // TODO(tebbi): Do we need an lfence here?
break;
}
case kArchJmp:
@@ -872,6 +926,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movq(i.OutputRegister(), rbp);
}
break;
+ case kArchRootsPointer:
+ __ movq(i.OutputRegister(), kRootRegister);
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -904,6 +961,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kArchPoisonOnSpeculationWord:
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
+ __ andq(i.InputRegister(0), kSpeculationPoisonRegister);
+ break;
case kLFence:
__ lfence();
break;
@@ -965,8 +1026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
__ Movsd(xmm2, xmm0);
- __ CallStubDelayed(new (zone())
- MathPowStub(nullptr, MathPowStub::DOUBLE));
+ __ CallStubDelayed(new (zone()) MathPowStub());
__ Movsd(xmm0, xmm3);
break;
}
@@ -1895,16 +1955,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movss(operand, i.InputDoubleRegister(index));
}
break;
- case kX64Movsd:
+ case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ const MemoryAccessMode access_mode =
+ static_cast<MemoryAccessMode>(MiscField::decode(opcode));
+ if (access_mode == kMemoryAccessPoisoned) {
+ // If we have to poison the loaded value, we load into a general
+ // purpose register first, mask it with the poison, and move the
+ // value from the general purpose register into the double register.
+ __ movq(kScratchRegister, i.MemoryOperand());
+ __ andq(kScratchRegister, kSpeculationPoisonRegister);
+ __ Movq(i.OutputDoubleRegister(), kScratchRegister);
+ } else {
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+ }
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ Movsd(operand, i.InputDoubleRegister(index));
}
break;
+ }
case kX64Movdqu: {
CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
@@ -2169,6 +2241,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F32x4AddHoriz: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE3);
__ haddps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2723,6 +2796,67 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orl)
ATOMIC_BINOP_CASE(Xor, xorl)
#undef ATOMIC_BINOP_CASE
+ case kX64Word64AtomicExchangeUint8: {
+ __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzxbq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kX64Word64AtomicExchangeUint16: {
+ __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
+ __ movzxwq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
+ case kX64Word64AtomicExchangeUint32: {
+ __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ break;
+ }
+ case kX64Word64AtomicExchangeUint64: {
+ __ xchgq(i.InputRegister(0), i.MemoryOperand(1));
+ break;
+ }
+ case kX64Word64AtomicCompareExchangeUint8: {
+ __ lock();
+ __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzxbq(rax, rax);
+ break;
+ }
+ case kX64Word64AtomicCompareExchangeUint16: {
+ __ lock();
+ __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
+ __ movzxwq(rax, rax);
+ break;
+ }
+ case kX64Word64AtomicCompareExchangeUint32: {
+ __ lock();
+ __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
+ break;
+ }
+ case kX64Word64AtomicCompareExchangeUint64: {
+ __ lock();
+ __ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
+ break;
+ }
+#define ATOMIC64_BINOP_CASE(op, inst) \
+ case kX64Word64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
+ __ movzxbq(rax, rax); \
+ break; \
+ case kX64Word64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
+ __ movzxwq(rax, rax); \
+ break; \
+ case kX64Word64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
+ break; \
+ case kX64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
+ break;
+ ATOMIC64_BINOP_CASE(Add, addq)
+ ATOMIC64_BINOP_CASE(Sub, subq)
+ ATOMIC64_BINOP_CASE(And, andq)
+ ATOMIC64_BINOP_CASE(Or, orq)
+ ATOMIC64_BINOP_CASE(Xor, xorq)
+#undef ATOMIC64_BINOP_CASE
case kWord32AtomicLoadInt8:
case kWord32AtomicLoadUint8:
case kWord32AtomicLoadInt16:
@@ -2731,11 +2865,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord8:
case kWord32AtomicStoreWord16:
case kWord32AtomicStoreWord32:
+ case kX64Word64AtomicLoadUint8:
+ case kX64Word64AtomicLoadUint16:
+ case kX64Word64AtomicLoadUint32:
+ case kX64Word64AtomicLoadUint64:
+ case kX64Word64AtomicStoreWord8:
+ case kX64Word64AtomicStoreWord16:
+ case kX64Word64AtomicStoreWord32:
+ case kX64Word64AtomicStoreWord64:
UNREACHABLE(); // Won't be generated by instruction selector.
break;
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+} // NOLadability/fn_size)
+
+#undef ASSEMBLE_UNOP
+#undef ASSEMBLE_BINOP
+#undef ASSEMBLE_COMPARE
+#undef ASSEMBLE_MULT
+#undef ASSEMBLE_SHIFT
+#undef ASSEMBLE_MOVX
+#undef ASSEMBLE_SSE_BINOP
+#undef ASSEMBLE_SSE_UNOP
+#undef ASSEMBLE_AVX_BINOP
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC64_BINOP
namespace {
@@ -2773,19 +2929,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-#undef ASSEMBLE_UNOP
-#undef ASSEMBLE_BINOP
-#undef ASSEMBLE_COMPARE
-#undef ASSEMBLE_MULT
-#undef ASSEMBLE_SHIFT
-#undef ASSEMBLE_MOVX
-#undef ASSEMBLE_SSE_BINOP
-#undef ASSEMBLE_SSE_UNOP
-#undef ASSEMBLE_AVX_BINOP
-#undef ASSEMBLE_IEEE754_BINOP
-#undef ASSEMBLE_IEEE754_UNOP
-#undef ASSEMBLE_ATOMIC_BINOP
-
} // namespace
// Assembles branches after this instruction.
@@ -2864,62 +3007,8 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) {
- class OutOfLineTrap final : public OutOfLineCode {
- public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
-
- void Generate() final {
- X64OperandConverter i(gen_, instr_);
-
- Builtins::Name trap_id =
- static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
- GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- __ set_has_frame(old_has_frame);
- }
- }
-
- private:
- void GenerateCallToTrap(Builtins::Name trap_id) {
- if (trap_id == Builtins::builtin_count) {
- // We cannot test calls to the runtime in cctest/test-run-wasm.
- // Therefore we emit a call to C here instead of a call to the runtime.
- __ PrepareCallCFunction(0);
- __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
- __ isolate()),
- 0);
- __ LeaveFrame(StackFrame::WASM_COMPILED);
- auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
- size_t pop_size = call_descriptor->StackParameterCount() * kPointerSize;
- // Use rcx as a scratch register, we return anyways immediately.
- __ Ret(static_cast<int>(pop_size), rcx);
- } else {
- gen_->AssembleSourcePosition(instr_);
- __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
- RelocInfo::CODE_TARGET);
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
- }
- }
-
- bool frame_elided_;
- Instruction* instr_;
- CodeGenerator* gen_;
- };
bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ auto ool = new (zone()) WasmOutOfLineTrap(this, frame_elided, instr);
Label* tlabel = ool->entry();
Label end;
if (condition == kUnorderedEqual) {
@@ -3046,7 +3135,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
- InitializePoisonForLoadsIfNeeded();
+ ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3204,9 +3293,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ movq(dst, src.ToInt64(), src.rmode());
} else {
int32_t value = src.ToInt32();
- if (RelocInfo::IsWasmSizeReference(src.rmode())) {
- __ movl(dst, Immediate(value, src.rmode()));
- } else if (value == 0) {
+ if (value == 0) {
__ xorl(dst, dst);
} else {
__ movl(dst, Immediate(value));
@@ -3218,7 +3305,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
- DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Set(dst, src.ToInt64());
}
break;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index e758072050..88474b2494 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -11,230 +11,266 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64StackCheck) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4AddHoriz) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4ReplaceLane) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4AddHoriz) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLane) \
- V(X64I16x8ReplaceLane) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8Add) \
- V(X64I16x8AddSaturateS) \
- V(X64I16x8AddHoriz) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSaturateS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8ShrU) \
- V(X64I16x8AddSaturateU) \
- V(X64I16x8SubSaturateU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLane) \
- V(X64I8x16ReplaceLane) \
- V(X64I8x16Neg) \
- V(X64I8x16Add) \
- V(X64I8x16AddSaturateS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSaturateS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16AddSaturateU) \
- V(X64I8x16SubSaturateU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Not) \
- V(X64S128Select) \
- V(X64S128Zero)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
+ V(SSEFloat32Neg) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Abs) \
+ V(SSEFloat64Neg) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
+ V(X64Movsxbl) \
+ V(X64Movzxbl) \
+ V(X64Movsxbq) \
+ V(X64Movzxbq) \
+ V(X64Movb) \
+ V(X64Movsxwl) \
+ V(X64Movzxwl) \
+ V(X64Movsxwq) \
+ V(X64Movzxwq) \
+ V(X64Movw) \
+ V(X64Movl) \
+ V(X64Movsxlq) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movdqu) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64StackCheck) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4AddHoriz) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4ReplaceLane) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4AddHoriz) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLane) \
+ V(X64I16x8ReplaceLane) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSaturateS) \
+ V(X64I16x8AddHoriz) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSaturateS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8AddSaturateU) \
+ V(X64I16x8SubSaturateU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLane) \
+ V(X64I8x16ReplaceLane) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSaturateS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSaturateS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16AddSaturateU) \
+ V(X64I8x16SubSaturateU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Not) \
+ V(X64S128Select) \
+ V(X64S128Zero) \
+ V(X64Word64AtomicLoadUint8) \
+ V(X64Word64AtomicLoadUint16) \
+ V(X64Word64AtomicLoadUint32) \
+ V(X64Word64AtomicLoadUint64) \
+ V(X64Word64AtomicStoreWord8) \
+ V(X64Word64AtomicStoreWord16) \
+ V(X64Word64AtomicStoreWord32) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicAddUint8) \
+ V(X64Word64AtomicAddUint16) \
+ V(X64Word64AtomicAddUint32) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint8) \
+ V(X64Word64AtomicSubUint16) \
+ V(X64Word64AtomicSubUint32) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint8) \
+ V(X64Word64AtomicAndUint16) \
+ V(X64Word64AtomicAndUint32) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint8) \
+ V(X64Word64AtomicOrUint16) \
+ V(X64Word64AtomicOrUint32) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint8) \
+ V(X64Word64AtomicXorUint16) \
+ V(X64Word64AtomicXorUint32) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicExchangeUint8) \
+ V(X64Word64AtomicExchangeUint16) \
+ V(X64Word64AtomicExchangeUint32) \
+ V(X64Word64AtomicExchangeUint64) \
+ V(X64Word64AtomicCompareExchangeUint8) \
+ V(X64Word64AtomicCompareExchangeUint16) \
+ V(X64Word64AtomicCompareExchangeUint32) \
+ V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 1d0e182303..c3c0d3a2a5 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -267,6 +267,46 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kLFence:
return kHasSideEffect;
+ case kX64Word64AtomicLoadUint8:
+ case kX64Word64AtomicLoadUint16:
+ case kX64Word64AtomicLoadUint32:
+ case kX64Word64AtomicLoadUint64:
+ return kIsLoadOperation;
+
+ case kX64Word64AtomicStoreWord8:
+ case kX64Word64AtomicStoreWord16:
+ case kX64Word64AtomicStoreWord32:
+ case kX64Word64AtomicStoreWord64:
+ case kX64Word64AtomicAddUint8:
+ case kX64Word64AtomicAddUint16:
+ case kX64Word64AtomicAddUint32:
+ case kX64Word64AtomicAddUint64:
+ case kX64Word64AtomicSubUint8:
+ case kX64Word64AtomicSubUint16:
+ case kX64Word64AtomicSubUint32:
+ case kX64Word64AtomicSubUint64:
+ case kX64Word64AtomicAndUint8:
+ case kX64Word64AtomicAndUint16:
+ case kX64Word64AtomicAndUint32:
+ case kX64Word64AtomicAndUint64:
+ case kX64Word64AtomicOrUint8:
+ case kX64Word64AtomicOrUint16:
+ case kX64Word64AtomicOrUint32:
+ case kX64Word64AtomicOrUint64:
+ case kX64Word64AtomicXorUint8:
+ case kX64Word64AtomicXorUint16:
+ case kX64Word64AtomicXorUint32:
+ case kX64Word64AtomicXorUint64:
+ case kX64Word64AtomicExchangeUint8:
+ case kX64Word64AtomicExchangeUint16:
+ case kX64Word64AtomicExchangeUint32:
+ case kX64Word64AtomicExchangeUint64:
+ case kX64Word64AtomicCompareExchangeUint8:
+ case kX64Word64AtomicCompareExchangeUint16:
+ case kX64Word64AtomicCompareExchangeUint32:
+ case kX64Word64AtomicCompareExchangeUint64:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 25dc5e9658..db7584ccf9 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -25,12 +25,12 @@ class X64OperandGenerator final : public OperandGenerator {
case IrOpcode::kRelocatableInt32Constant:
return true;
case IrOpcode::kInt64Constant: {
- const int64_t value = OpParameter<int64_t>(node);
+ const int64_t value = OpParameter<int64_t>(node->op());
return std::numeric_limits<int32_t>::min() < value &&
value <= std::numeric_limits<int32_t>::max();
}
case IrOpcode::kNumberConstant: {
- const double value = OpParameter<double>(node);
+ const double value = OpParameter<double>(node->op());
return bit_cast<int64_t>(value) == 0;
}
default:
@@ -41,10 +41,10 @@ class X64OperandGenerator final : public OperandGenerator {
int32_t GetImmediateIntegerValue(Node* node) {
DCHECK(CanBeImmediate(node));
if (node->opcode() == IrOpcode::kInt32Constant) {
- return OpParameter<int32_t>(node);
+ return OpParameter<int32_t>(node->op());
}
DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
- return static_cast<int32_t>(OpParameter<int64_t>(node));
+ return static_cast<int32_t>(OpParameter<int64_t>(node->op()));
}
bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
@@ -96,10 +96,10 @@ class X64OperandGenerator final : public OperandGenerator {
AddressingMode mode = kMode_MRI;
if (base != nullptr && (index != nullptr || displacement != nullptr)) {
if (base->opcode() == IrOpcode::kInt32Constant &&
- OpParameter<int32_t>(base) == 0) {
+ OpParameter<int32_t>(base->op()) == 0) {
base = nullptr;
} else if (base->opcode() == IrOpcode::kInt64Constant &&
- OpParameter<int64_t>(base) == 0) {
+ OpParameter<int64_t>(base->op()) == 0) {
base = nullptr;
}
}
@@ -178,6 +178,17 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
}
+ if (operand->InputCount() == 2) {
+ Node* left = operand->InputAt(0);
+ Node* right = operand->InputAt(1);
+ if (left->opcode() == IrOpcode::kLoadRootsPointer &&
+ right->opcode() == IrOpcode::kInt64Constant) {
+ int64_t offset = OpParameter<int64_t>(right->op());
+ DCHECK(is_int32(offset));
+ inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(offset));
+ return kMode_Root;
+ }
+ }
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
@@ -309,7 +320,7 @@ void InstructionSelector::VisitLoad(Node* node) {
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= MiscField::encode(kMemoryAccessProtected);
} else if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_EQ(load_poisoning_, LoadPoisoning::kDoPoison);
+ CHECK_EQ(poisoning_enabled_, PoisoningMitigationLevel::kOn);
code |= MiscField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs);
@@ -416,9 +427,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node);
Node* left = m.left().node();
Node* right = m.right().node();
- InstructionOperand inputs[6];
+ InstructionOperand inputs[4];
size_t input_count = 0;
- InstructionOperand outputs[2];
+ InstructionOperand outputs[1];
size_t output_count = 0;
// TODO(turbofan): match complex addressing modes.
@@ -465,23 +476,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
outputs[output_count++] = g.DefineSameAsFirst(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
+ DCHECK_EQ(1u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
}
@@ -510,7 +512,6 @@ void InstructionSelector::VisitWord64And(Node* node) {
VisitBinop(this, node, kX64And);
}
-
void InstructionSelector::VisitWord32Or(Node* node) {
VisitBinop(this, node, kX64Or32);
}
@@ -1524,51 +1525,20 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
DCHECK_EQ(IrOpcode::kLoad, left->opcode());
X64OperandGenerator g(selector);
size_t input_count = 0;
- InstructionOperand inputs[6];
+ InstructionOperand inputs[4];
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
opcode |= AddressingModeField::encode(addressing_mode);
- opcode = cont->Encode(opcode);
inputs[input_count++] = right;
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- InstructionOperand output = g.DefineAsRegister(cont->result());
- selector->Emit(opcode, 1, &output, input_count, inputs);
- } else {
- DCHECK(cont->IsTrap());
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- }
+ selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
}
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
- X64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->feedback(),
- cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
+ selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1589,8 +1559,8 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
if (node->opcode() == IrOpcode::kInt32Constant ||
node->opcode() == IrOpcode::kInt64Constant) {
int64_t constant = node->opcode() == IrOpcode::kInt32Constant
- ? OpParameter<int32_t>(node)
- : OpParameter<int64_t>(node);
+ ? OpParameter<int32_t>(node->op())
+ : OpParameter<int64_t>(node->op());
if (hint == MachineType::Int8()) {
if (constant >= std::numeric_limits<int8_t>::min() &&
constant <= std::numeric_limits<int8_t>::max()) {
@@ -1745,8 +1715,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
InstructionCode opcode = cont->Encode(kX64StackCheck);
CHECK(cont->IsBranch());
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ selector->EmitWithContinuation(opcode, cont);
return;
}
}
@@ -1937,21 +1906,21 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
// Emit either ArchTableSwitch or ArchLookupSwitch.
if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
static const size_t kMaxTableSwitchValueRange = 2 << 16;
- size_t table_space_cost = 4 + sw.value_range;
+ size_t table_space_cost = 4 + sw.value_range();
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min() &&
- sw.value_range <= kMaxTableSwitchValueRange) {
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = g.TempRegister();
- if (sw.min_value) {
+ if (sw.min_value()) {
// The leal automatically zero extends, so result is a valid 64-bit
// index.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
+ value_operand, g.TempImmediate(-sw.min_value()));
} else {
// Zero extend, because we use it as 64-bit index into the jump table.
Emit(kX64Movl, index_operand, value_operand);
@@ -2161,6 +2130,12 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
VisitLoad(node);
}
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ USE(load_rep);
+ VisitLoad(node);
+}
+
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2199,6 +2174,48 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
}
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kX64Word64AtomicExchangeUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kX64Word64AtomicExchangeUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kX64Word64AtomicExchangeUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kX64Word64AtomicExchangeUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ DCHECK_LE(input_count, arraysize(inputs));
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
+
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2239,6 +2256,44 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint8()) {
+ opcode = kX64Word64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kX64Word64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kX64Word64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kX64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineSameAsFirst(node);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -2281,6 +2336,46 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint8()) {
+ opcode = kX64Word64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kX64Word64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kX64Word64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kX64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseFixed(old_value, rax);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, rax);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitAtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
@@ -2339,6 +2434,61 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode word64_op) {
+ X64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ MachineType type = AtomicOpRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = word64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ InstructionOperand outputs[1];
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ if (g.CanBeImmediate(index)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MR1;
+ }
+ outputs[0] = g.DefineAsFixed(node, rax);
+ InstructionOperand temp[1];
+ temp[0] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ Emit(code, 1, outputs, input_count, inputs, 1, temp);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
+ kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
#define SIMD_TYPES(V) \
V(F32x4) \
V(I32x4) \
@@ -2443,7 +2593,7 @@ SIMD_TYPES(VISIT_SIMD_SPLAT)
#define VISIT_SIMD_EXTRACT_LANE(Type) \
void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
Emit(kX64##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
@@ -2453,7 +2603,7 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
#define VISIT_SIMD_REPLACE_LANE(Type) \
void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
X64OperandGenerator g(this); \
- int32_t lane = OpParameter<int32_t>(node); \
+ int32_t lane = OpParameter<int32_t>(node->op()); \
Emit(kX64##Type##ReplaceLane, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane), \
g.Use(node->InputAt(1))); \
@@ -2464,7 +2614,7 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
- int32_t value = OpParameter<int32_t>(node); \
+ int32_t value = OpParameter<int32_t>(node->op()); \
Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(value)); \
}
@@ -2539,9 +2689,6 @@ InstructionSelector::AlignmentRequirements() {
FullUnalignedAccessSupport();
}
-// static
-bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 22e3606e98..3ff737e5f4 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -80,48 +80,39 @@ void Context::set_native_context(Context* context) {
}
bool Context::IsNativeContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->native_context_map();
+ return map()->instance_type() == NATIVE_CONTEXT_TYPE;
}
bool Context::IsFunctionContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->function_context_map();
+ return map()->instance_type() == FUNCTION_CONTEXT_TYPE;
}
bool Context::IsCatchContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->catch_context_map();
+ return map()->instance_type() == CATCH_CONTEXT_TYPE;
}
bool Context::IsWithContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->with_context_map();
+ return map()->instance_type() == WITH_CONTEXT_TYPE;
}
bool Context::IsDebugEvaluateContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->debug_evaluate_context_map();
+ return map()->instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE;
}
bool Context::IsBlockContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->block_context_map();
+ return map()->instance_type() == BLOCK_CONTEXT_TYPE;
}
bool Context::IsModuleContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->module_context_map();
+ return map()->instance_type() == MODULE_CONTEXT_TYPE;
}
bool Context::IsEvalContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->eval_context_map();
+ return map()->instance_type() == EVAL_CONTEXT_TYPE;
}
bool Context::IsScriptContext() const {
- Map* map = this->map();
- return map == map->GetHeap()->script_context_map();
+ return map()->instance_type() == SCRIPT_CONTEXT_TYPE;
}
bool Context::HasSameSecurityTokenAs(Context* that) const {
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 02337fb456..6fc6f2d1c3 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -57,8 +57,6 @@ enum ContextLookupFlags {
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
V(MATH_POW_INDEX, JSFunction, math_pow) \
V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
@@ -71,7 +69,6 @@ enum ContextLookupFlags {
V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator) \
V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator) \
@@ -107,83 +104,6 @@ enum ContextLookupFlags {
V(WEAKMAP_SET_INDEX, JSFunction, weakmap_set) \
V(WEAKSET_ADD_INDEX, JSFunction, weakset_add)
-// If you add something here, also add it to ARRAY_ITERATOR_LIST in
-// bootstrapper.cc.
-#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
- V(TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, typed_array_key_iterator_map) \
- V(FAST_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, fast_array_key_iterator_map) \
- V(GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, array_key_iterator_map) \
- \
- V(UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint8_array_key_value_iterator_map) \
- V(INT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- int8_array_key_value_iterator_map) \
- V(UINT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint16_array_key_value_iterator_map) \
- V(INT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- int16_array_key_value_iterator_map) \
- V(UINT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint32_array_key_value_iterator_map) \
- V(INT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- int32_array_key_value_iterator_map) \
- V(FLOAT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- float32_array_key_value_iterator_map) \
- V(FLOAT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- float64_array_key_value_iterator_map) \
- V(UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint8_clamped_array_key_value_iterator_map) \
- V(BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- biguint64_array_key_value_iterator_map) \
- V(BIGINT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- bigint64_array_key_value_iterator_map) \
- \
- V(FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_smi_array_key_value_iterator_map) \
- V(FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_smi_array_key_value_iterator_map) \
- V(FAST_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_array_key_value_iterator_map) \
- V(FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_array_key_value_iterator_map) \
- V(FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_double_array_key_value_iterator_map) \
- V(FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_double_array_key_value_iterator_map) \
- V(GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
- array_key_value_iterator_map) \
- \
- V(UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, uint8_array_value_iterator_map) \
- V(INT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int8_array_value_iterator_map) \
- V(UINT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint16_array_value_iterator_map) \
- V(INT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int16_array_value_iterator_map) \
- V(UINT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint32_array_value_iterator_map) \
- V(INT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int32_array_value_iterator_map) \
- V(FLOAT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- float32_array_value_iterator_map) \
- V(FLOAT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- float64_array_value_iterator_map) \
- V(UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- uint8_clamped_array_value_iterator_map) \
- V(BIGUINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- biguint64_array_value_iterator_map) \
- V(BIGINT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- bigint64_array_value_iterator_map) \
- \
- V(FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_smi_array_value_iterator_map) \
- V(FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_smi_array_value_iterator_map) \
- V(FAST_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, fast_array_value_iterator_map) \
- V(FAST_HOLEY_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_array_value_iterator_map) \
- V(FAST_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_double_array_value_iterator_map) \
- V(FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
- fast_holey_double_array_value_iterator_map) \
- V(GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, array_value_iterator_map)
-
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
@@ -235,10 +155,9 @@ enum ContextLookupFlags {
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(ASYNC_GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
async_generator_object_prototype_map) \
+ V(INITIAL_ARRAY_ITERATOR_MAP_INDEX, Map, initial_array_iterator_map) \
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \
initial_array_iterator_prototype) \
- V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
- initial_array_iterator_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INITIAL_ARRAY_PROTOTYPE_MAP_INDEX, Map, initial_array_prototype_map) \
V(INITIAL_ERROR_PROTOTYPE_INDEX, JSObject, initial_error_prototype) \
@@ -325,6 +244,8 @@ enum ContextLookupFlags {
V(REGEXP_INTERNAL_MATCH_INFO_INDEX, RegExpMatchInfo, \
regexp_internal_match_info) \
V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
+ V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
+ initial_regexp_string_iterator_prototype_map_index) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
@@ -387,6 +308,7 @@ enum ContextLookupFlags {
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map) \
+ V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
@@ -398,8 +320,7 @@ enum ContextLookupFlags {
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
+ NATIVE_CONTEXT_IMPORTED_FIELDS(V)
// A table of all script contexts. Every loaded top-level script with top-level
// lexical declarations contributes its ScriptContext into this table.
@@ -429,16 +350,17 @@ class ScriptContextTable : public FixedArray {
// If it returns true, the variable is found and `result` contains
// valid information about its location.
// If it returns false, `result` is untouched.
- MUST_USE_RESULT
+ V8_WARN_UNUSED_RESULT
static bool Lookup(Handle<ScriptContextTable> table, Handle<String> name,
LookupResult* result);
- MUST_USE_RESULT
+ V8_WARN_UNUSED_RESULT
static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
Handle<Context> script_context);
static const int kUsedSlotIndex = 0;
static const int kFirstContextSlotIndex = 1;
+ static const int kMinLength = kFirstContextSlotIndex;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
};
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 827ccbd773..8aa4abccda 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -12,8 +12,8 @@
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
#include "src/dtoa.h"
-#include "src/factory.h"
#include "src/handles.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
#include "src/objects/bigint.h"
#include "src/strtod.h"
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index e06cb8b66d..3c674b0ae0 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -694,6 +694,8 @@ class RuntimeCallTimer final {
V(Array_New) \
V(BigInt64Array_New) \
V(BigUint64Array_New) \
+ V(BigIntObject_New) \
+ V(BigIntObject_BigIntValue) \
V(BooleanObject_BooleanValue) \
V(BooleanObject_New) \
V(Context_New) \
@@ -767,6 +769,7 @@ class RuntimeCallTimer final {
V(ObjectTemplate_New) \
V(ObjectTemplate_NewInstance) \
V(Object_ToArrayIndex) \
+ V(Object_ToBigInt) \
V(Object_ToDetailString) \
V(Object_ToInt32) \
V(Object_ToInteger) \
@@ -930,6 +933,7 @@ class RuntimeCallTimer final {
V(KeyedStoreIC_SlowStub) \
V(KeyedStoreIC_StoreFastElementStub) \
V(KeyedStoreIC_StoreElementStub) \
+ V(StoreInArrayLiteralIC_SlowStub) \
V(LoadGlobalIC_LoadScriptContextField) \
V(LoadGlobalIC_SlowStub) \
V(LoadIC_FunctionPrototypeStub) \
@@ -1144,10 +1148,20 @@ class RuntimeCallTimerScope {
#define HISTOGRAM_TIMER_LIST(HT) \
/* Garbage collection timers. */ \
HT(gc_compactor, V8.GCCompactor, 10000, MILLISECOND) \
+ HT(gc_compactor_background, V8.GCCompactorBackground, 10000, MILLISECOND) \
+ HT(gc_compactor_foreground, V8.GCCompactorForeground, 10000, MILLISECOND) \
HT(gc_finalize, V8.GCFinalizeMC, 10000, MILLISECOND) \
+ HT(gc_finalize_background, V8.GCFinalizeMCBackground, 10000, MILLISECOND) \
+ HT(gc_finalize_foreground, V8.GCFinalizeMCForeground, 10000, MILLISECOND) \
HT(gc_finalize_reduce_memory, V8.GCFinalizeMCReduceMemory, 10000, \
MILLISECOND) \
+ HT(gc_finalize_reduce_memory_background, \
+ V8.GCFinalizeMCReduceMemoryBackground, 10000, MILLISECOND) \
+ HT(gc_finalize_reduce_memory_foreground, \
+ V8.GCFinalizeMCReduceMemoryForeground, 10000, MILLISECOND) \
HT(gc_scavenger, V8.GCScavenger, 10000, MILLISECOND) \
+ HT(gc_scavenger_background, V8.GCScavengerBackground, 10000, MILLISECOND) \
+ HT(gc_scavenger_foreground, V8.GCScavengerForeground, 10000, MILLISECOND) \
HT(gc_context, V8.GCContext, 10000, \
MILLISECOND) /* GC context cleanup time */ \
HT(gc_idle_notification, V8.GCIdleNotification, 10000, MILLISECOND) \
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
index 7f057e2867..cb7334af78 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8-console.cc
@@ -26,7 +26,7 @@ void WriteToFile(const char* prefix, FILE* file, Isolate* isolate,
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
if (n != str.length()) {
printf("Error in fwrite\n");
- Shell::Exit(1);
+ base::OS::ExitProcess(1);
}
}
fprintf(file, "\n");
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index c16b963776..1172bf8536 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -874,12 +874,4 @@ void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {
FunctionTemplate::New(isolate, RemoveDirectory));
}
-void Shell::Exit(int exit_code) {
- // Use _exit instead of exit to avoid races between isolate
- // threads and static destructors.
- fflush(stdout);
- fflush(stderr);
- _exit(exit_code);
-}
-
} // namespace v8
diff --git a/deps/v8/src/d8-windows.cc b/deps/v8/src/d8-windows.cc
index 499292330f..4dee20f440 100644
--- a/deps/v8/src/d8-windows.cc
+++ b/deps/v8/src/d8-windows.cc
@@ -15,12 +15,4 @@ char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
return nullptr;
}
-void Shell::Exit(int exit_code) {
- // Use TerminateProcess avoid races between isolate threads and
- // static destructors.
- fflush(stdout);
- fflush(stderr);
- TerminateProcess(GetCurrentProcess(), exit_code);
-}
-
} // namespace v8
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7399af9995..2f6d833b40 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -40,14 +40,12 @@
#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
#include "src/v8.h"
+#include "src/wasm/wasm-engine.h"
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
#include <windows.h> // NOLINT
-#if defined(_MSC_VER)
-#include <crtdbg.h> // NOLINT
-#endif // defined(_MSC_VER)
#endif // !defined(_WIN32) && !defined(_WIN64)
#ifndef DCHECK
@@ -183,19 +181,17 @@ class PredictablePlatform : public Platform {
return platform_->GetForegroundTaskRunner(isolate);
}
- std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override {
// Return the foreground task runner here, so that all tasks get executed
// sequentially in a predictable order.
return platform_->GetForegroundTaskRunner(isolate);
}
- void CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) override {
+ void CallOnWorkerThread(std::unique_ptr<Task> task) override {
// It's not defined when background tasks are being executed, so we can just
// execute them right away.
task->Run();
- delete task;
}
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override {
@@ -573,8 +569,9 @@ void Shell::StoreInCodeCache(Isolate* isolate, Local<Value> source,
// Executes a string within the current v8 context.
bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
- Local<Value> name, bool print_result,
- bool report_exceptions) {
+ Local<Value> name, PrintResult print_result,
+ ReportExceptions report_exceptions,
+ ProcessMessageQueue process_message_queue) {
HandleScope handle_scope(isolate);
TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
@@ -652,7 +649,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
StoreInCodeCache(isolate, source, cached_data);
delete cached_data;
}
- if (!EmptyMessageQueues(isolate)) success = false;
+ if (process_message_queue && !EmptyMessageQueues(isolate)) success = false;
data->realm_current_ = data->realm_switch_;
}
Local<Value> result;
@@ -1278,7 +1275,7 @@ void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
if (n != str.length()) {
printf("Error in fwrite\n");
- Shell::Exit(1);
+ base::OS::ExitProcess(1);
}
}
}
@@ -1374,11 +1371,14 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Error loading file");
return;
}
- if (!ExecuteString(args.GetIsolate(), source,
- String::NewFromUtf8(args.GetIsolate(), *file,
- NewStringType::kNormal)
- .ToLocalChecked(),
- false, !options.quiet_load)) {
+ if (!ExecuteString(
+ args.GetIsolate(), source,
+ String::NewFromUtf8(args.GetIsolate(), *file,
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ kNoPrintResult,
+ options.quiet_load ? kNoReportExceptions : kReportExceptions,
+ kNoProcessMessageQueue)) {
Throw(args.GetIsolate(), "Error executing file");
return;
}
@@ -1497,7 +1497,7 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
CleanupWorkers();
args->GetIsolate()->Exit();
OnExit(args->GetIsolate());
- Exit(exit_code);
+ base::OS::ExitProcess(exit_code);
}
@@ -1621,7 +1621,7 @@ void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
(counters_file_ == nullptr) ? nullptr : counters_file_->memory();
if (memory == nullptr) {
printf("Could not map counters file %s\n", name);
- Exit(1);
+ base::OS::ExitProcess(1);
}
counters_ = static_cast<CounterCollection*>(memory);
isolate->SetCounterFunction(LookupCounter);
@@ -2270,7 +2270,8 @@ void Shell::RunShell(Isolate* isolate) {
printf("d8> ");
Local<String> input = Shell::ReadFromStdin(isolate);
if (input.IsEmpty()) break;
- ExecuteString(isolate, input, name, true, true);
+ ExecuteString(isolate, input, name, kPrintResult, kReportExceptions,
+ kProcessMessageQueue);
}
printf("\n");
// We need to explicitly clean up the module embedder data for
@@ -2432,7 +2433,9 @@ void SourceGroup::Execute(Isolate* isolate) {
String::NewFromUtf8(isolate, argv_[i + 1], NewStringType::kNormal)
.ToLocalChecked();
Shell::options.script_executed = true;
- if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ if (!Shell::ExecuteString(isolate, source, file_name,
+ Shell::kNoPrintResult, Shell::kReportExceptions,
+ Shell::kNoProcessMessageQueue)) {
exception_was_thrown = true;
break;
}
@@ -2460,16 +2463,18 @@ void SourceGroup::Execute(Isolate* isolate) {
Local<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
- Shell::Exit(1);
+ base::OS::ExitProcess(1);
}
Shell::options.script_executed = true;
- if (!Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ if (!Shell::ExecuteString(isolate, source, file_name, Shell::kNoPrintResult,
+ Shell::kReportExceptions,
+ Shell::kProcessMessageQueue)) {
exception_was_thrown = true;
break;
}
}
if (exception_was_thrown != Shell::options.expected_to_throw) {
- Shell::Exit(1);
+ base::OS::ExitProcess(1);
}
}
@@ -2488,8 +2493,7 @@ void SourceGroup::ExecuteInThread() {
Shell::HostImportModuleDynamically);
isolate->SetHostInitializeImportMetaObjectCallback(
Shell::HostInitializeImportMetaObject);
-
- Shell::EnsureEventLoopInitialized(isolate);
+ Shell::SetWaitUntilDone(isolate, false);
D8Console console(isolate);
debug::SetConsoleDelegate(isolate, &console);
for (int i = 0; i < Shell::options.stress_runs; ++i) {
@@ -2672,7 +2676,9 @@ void Worker::ExecuteInThread() {
Local<String> source =
String::NewFromUtf8(isolate, script_, NewStringType::kNormal)
.ToLocalChecked();
- if (Shell::ExecuteString(isolate, source, file_name, false, true)) {
+ if (Shell::ExecuteString(
+ isolate, source, file_name, Shell::kNoPrintResult,
+ Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
// Get the message handler
Local<Value> onmessage =
global->Get(context, String::NewFromUtf8(isolate, "onmessage",
@@ -2794,6 +2800,11 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--omit-quit") == 0) {
options.omit_quit = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--no-wait-for-wasm") == 0) {
+ // TODO(herhut) Remove this flag once wasm compilation is fully
+ // isolate-independent.
+ options.wait_for_wasm = false;
+ argv[i] = nullptr;
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@@ -2905,13 +2916,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
return true;
}
-
int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
{
- EnsureEventLoopInitialized(isolate);
+ SetWaitUntilDone(isolate, false);
if (options.lcov_file) {
debug::Coverage::SelectMode(isolate, debug::Coverage::kBlockCount);
}
@@ -2962,11 +2972,6 @@ void Shell::CollectGarbage(Isolate* isolate) {
}
}
-void Shell::EnsureEventLoopInitialized(Isolate* isolate) {
- v8::platform::EnsureEventLoopInitialized(GetDefaultPlatform(), isolate);
- SetWaitUntilDone(isolate, false);
-}
-
void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
if (isolate_status_.count(isolate) == 0) {
@@ -3007,13 +3012,19 @@ bool ProcessMessages(Isolate* isolate,
} // anonymous namespace
void Shell::CompleteMessageLoop(Isolate* isolate) {
- ProcessMessages(isolate, [isolate]() {
+ auto get_waiting_behaviour = [isolate]() {
base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
DCHECK_GT(isolate_status_.count(isolate), 0);
- return isolate_status_[isolate]
- ? platform::MessageLoopBehavior::kWaitForWork
- : platform::MessageLoopBehavior::kDoNotWait;
- });
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::wasm::CompilationManager* wasm_compilation_manager =
+ i_isolate->wasm_engine()->compilation_manager();
+ bool should_wait = (options.wait_for_wasm &&
+ wasm_compilation_manager->HasRunningCompileJob()) ||
+ isolate_status_[isolate];
+ return should_wait ? platform::MessageLoopBehavior::kWaitForWork
+ : platform::MessageLoopBehavior::kDoNotWait;
+ };
+ ProcessMessages(isolate, get_waiting_behaviour);
}
bool Shell::EmptyMessageQueues(Isolate* isolate) {
@@ -3261,21 +3272,7 @@ void Shell::CleanupWorkers() {
int Shell::Main(int argc, char* argv[]) {
std::ofstream trace_file;
-#if (defined(_WIN32) || defined(_WIN64))
- UINT new_flags =
- SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
- UINT existing_flags = SetErrorMode(new_flags);
- SetErrorMode(existing_flags | new_flags);
-#if defined(_MSC_VER)
- _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
- _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
- _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
- _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
- _set_error_mode(_OUT_TO_STDERR);
-#endif // defined(_MSC_VER)
-#endif // defined(_WIN32) || defined(_WIN64)
+ v8::base::EnsureConsoleOutput();
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
@@ -3338,8 +3335,9 @@ int Shell::Main(int argc, char* argv[]) {
create_params.add_histogram_sample_callback = AddHistogramSample;
}
- if (i::trap_handler::IsTrapHandlerEnabled()) {
- if (!v8::V8::RegisterDefaultSignalHandler()) {
+ if (V8_TRAP_HANDLER_SUPPORTED && i::FLAG_wasm_trap_handler) {
+ constexpr bool use_default_signal_handler = true;
+ if (!v8::V8::EnableWebAssemblyTrapHandler(use_default_signal_handler)) {
fprintf(stderr, "Could not register signal handler");
exit(1);
}
@@ -3399,15 +3397,6 @@ int Shell::Main(int argc, char* argv[]) {
ShellOptions::CodeCacheOptions::kNoProduceCache) {
printf("============ Run: Produce code cache ============\n");
// First run to produce the cache
- result = RunMain(isolate, argc, argv, false);
-
- // Change the options to consume cache
- DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
- options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
- options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
-
- printf("============ Run: Consume code cache ============\n");
- // Second run to consume the cache in new isolate
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
i::FLAG_hash_seed ^= 1337; // Use a different hash seed.
@@ -3423,9 +3412,19 @@ int Shell::Main(int argc, char* argv[]) {
PerIsolateData data(isolate2);
Isolate::Scope isolate_scope(isolate2);
- result = RunMain(isolate2, argc, argv, true);
+ result = RunMain(isolate2, argc, argv, false);
}
isolate2->Dispose();
+
+ // Change the options to consume cache
+ DCHECK(options.compile_options == v8::ScriptCompiler::kEagerCompile ||
+ options.compile_options == v8::ScriptCompiler::kNoCompileOptions);
+ options.compile_options = v8::ScriptCompiler::kConsumeCodeCache;
+
+ printf("============ Run: Consume code cache ============\n");
+ // Second run to consume the cache in current isolate
+ result = RunMain(isolate, argc, argv, true);
+ options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
} else {
bool last_run = true;
result = RunMain(isolate, argc, argv, last_run);
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index bf4793ef04..731b080880 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -292,6 +292,7 @@ class ShellOptions {
send_idle_notification(false),
invoke_weak_callbacks(false),
omit_quit(false),
+ wait_for_wasm(true),
stress_opt(false),
stress_deopt(false),
stress_runs(1),
@@ -327,6 +328,7 @@ class ShellOptions {
bool send_idle_notification;
bool invoke_weak_callbacks;
bool omit_quit;
+ bool wait_for_wasm;
bool stress_opt;
bool stress_deopt;
int stress_runs;
@@ -356,9 +358,20 @@ class ShellOptions {
class Shell : public i::AllStatic {
public:
+ enum PrintResult : bool { kPrintResult = true, kNoPrintResult = false };
+ enum ReportExceptions : bool {
+ kReportExceptions = true,
+ kNoReportExceptions = false
+ };
+ enum ProcessMessageQueue : bool {
+ kProcessMessageQueue = true,
+ kNoProcessMessageQueue = false
+ };
+
static bool ExecuteString(Isolate* isolate, Local<String> source,
- Local<Value> name, bool print_result,
- bool report_exceptions);
+ Local<Value> name, PrintResult print_result,
+ ReportExceptions report_exceptions,
+ ProcessMessageQueue process_message_queue);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Local<String> ReadFile(Isolate* isolate, const char* name);
@@ -369,7 +382,6 @@ class Shell : public i::AllStatic {
static void OnExit(Isolate* isolate);
static void CollectGarbage(Isolate* isolate);
static bool EmptyMessageQueues(Isolate* isolate);
- static void EnsureEventLoopInitialized(Isolate* isolate);
static void CompleteMessageLoop(Isolate* isolate);
static std::unique_ptr<SerializationData> SerializeValue(
diff --git a/deps/v8/src/d8.isolate b/deps/v8/src/d8.isolate
deleted file mode 100644
index 1c9bd9e35c..0000000000
--- a/deps/v8/src/d8.isolate
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2015 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-{
- 'variables': {
- 'command': [
- '<(PRODUCT_DIR)/d8<(EXECUTABLE_SUFFIX)',
- ],
- 'files': [
- '<(PRODUCT_DIR)/d8<(EXECUTABLE_SUFFIX)',
- ],
- },
- 'includes': [
- 'base.isolate',
- ],
-} \ No newline at end of file
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index a402706ec5..c317810dbb 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -52,8 +52,14 @@ void DateCache::ResetDateCache() {
dst_usage_counter_ = 0;
before_ = &dst_[0];
after_ = &dst_[1];
- local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
+#ifdef V8_INTL_SUPPORT
+ if (!FLAG_icu_timezone_data) {
+#endif
+ local_offset_ms_ = kInvalidLocalOffsetInMs;
+#ifdef V8_INTL_SUPPORT
+ }
+#endif
tz_cache_->Clear();
tz_name_ = nullptr;
dst_tz_name_ = nullptr;
@@ -206,6 +212,70 @@ void DateCache::BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
*ms = time_in_day_ms % 1000;
}
+// Implements LocalTimeZonedjustment(t, isUTC)
+// ECMA 262 - ES#sec-local-time-zone-adjustment
+int DateCache::GetLocalOffsetFromOS(int64_t time_ms, bool is_utc) {
+ double offset;
+#ifdef V8_INTL_SUPPORT
+ if (FLAG_icu_timezone_data) {
+ offset = tz_cache_->LocalTimeOffset(static_cast<double>(time_ms), is_utc);
+ } else {
+#endif
+ // When ICU timezone data is not used, we need to compute the timezone
+ // offset for a given local time.
+ //
+ // The following shows that using DST for (t - LocalTZA - hour) produces
+ // correct conversion where LocalTZA is the timezone offset in winter (no
+ // DST) and the timezone offset is assumed to have no historical change.
+ // Note that it does not work for the past and the future if LocalTZA (no
+ // DST) is different from the current LocalTZA (no DST). For instance,
+ // this will break for Europe/Moscow in 2012 ~ 2013 because LocalTZA was
+ // 4h instead of the current 3h (as of 2018).
+ //
+ // Consider transition to DST at local time L1.
+ // Let L0 = L1 - hour, L2 = L1 + hour,
+ // U1 = UTC time that corresponds to L1,
+ // U0 = U1 - hour.
+ // Transitioning to DST moves local clock one hour forward L1 => L2, so
+ // U0 = UTC time that corresponds to L0 = L0 - LocalTZA,
+ // U1 = UTC time that corresponds to L1 = L1 - LocalTZA,
+ // U1 = UTC time that corresponds to L2 = L2 - LocalTZA - hour.
+ // Note that DST(U0 - hour) = 0, DST(U0) = 0, DST(U1) = 1.
+ // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour),
+ // U1 = L1 - LocalTZA - DST(L1 - LocalTZA - hour),
+ // U1 = L2 - LocalTZA - DST(L2 - LocalTZA - hour).
+ //
+ // Consider transition from DST at local time L1.
+ // Let L0 = L1 - hour,
+ // U1 = UTC time that corresponds to L1,
+ // U0 = U1 - hour, U2 = U1 + hour.
+ // Transitioning from DST moves local clock one hour back L1 => L0, so
+ // U0 = UTC time that corresponds to L0 (before transition)
+ // = L0 - LocalTZA - hour.
+ // U1 = UTC time that corresponds to L0 (after transition)
+ // = L0 - LocalTZA = L1 - LocalTZA - hour
+ // U2 = UTC time that corresponds to L1 = L1 - LocalTZA.
+ // Note that DST(U0) = 1, DST(U1) = 0, DST(U2) = 0.
+ // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour) = L0 - LocalTZA - DST(U0).
+ // U2 = L1 - LocalTZA - DST(L1 - LocalTZA - hour) = L1 - LocalTZA - DST(U1).
+ // It is impossible to get U1 from local time.
+ if (local_offset_ms_ == kInvalidLocalOffsetInMs) {
+ // This gets the constant LocalTZA (arguments are ignored).
+ local_offset_ms_ =
+ tz_cache_->LocalTimeOffset(static_cast<double>(time_ms), is_utc);
+ }
+ offset = local_offset_ms_;
+ if (!is_utc) {
+ const int kMsPerHour = 3600 * 1000;
+ time_ms -= (offset + kMsPerHour);
+ }
+ offset += DaylightSavingsOffsetInMs(time_ms);
+#ifdef V8_INTL_SUPPORT
+ }
+#endif
+ DCHECK_LT(offset, kInvalidLocalOffsetInMs);
+ return static_cast<int>(offset);
+}
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
diff --git a/deps/v8/src/date.h b/deps/v8/src/date.h
index d9fa13dae5..d25b6080a1 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date.h
@@ -75,13 +75,9 @@ class DateCache {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
}
-
- // ECMA 262 - 15.9.1.7.
- int LocalOffsetInMs() {
- if (local_offset_ms_ == kInvalidLocalOffsetInMs) {
- local_offset_ms_ = GetLocalOffsetFromOS();
- }
- return local_offset_ms_;
+ // ECMA 262 - ES#sec-local-time-zone-adjustment
+ int LocalOffsetInMs(int64_t time, bool is_utc) {
+ return GetLocalOffsetFromOS(time, is_utc);
}
@@ -103,53 +99,16 @@ class DateCache {
return static_cast<int>((time_ms - local_ms) / kMsPerMin);
}
- // ECMA 262 - 15.9.1.9
- // LocalTime(t) = t + LocalTZA + DaylightSavingTA(t)
+ // ECMA 262 - ES#sec-localtime-t
+ // LocalTime(t) = t + LocalTZA(t, true)
int64_t ToLocal(int64_t time_ms) {
- return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
+ return time_ms + LocalOffsetInMs(time_ms, true);
}
- // ECMA 262 - 15.9.1.9
- // UTC(t) = t - LocalTZA - DaylightSavingTA(t - LocalTZA)
+ // ECMA 262 - ES#sec-utc-t
+ // UTC(t) = t - LocalTZA(t, false)
int64_t ToUTC(int64_t time_ms) {
- // We need to compute UTC time that corresponds to the given local time.
- // Literally following spec here leads to incorrect time computation at
- // the points were we transition to and from DST.
- //
- // The following shows that using DST for (t - LocalTZA - hour) produces
- // correct conversion.
- //
- // Consider transition to DST at local time L1.
- // Let L0 = L1 - hour, L2 = L1 + hour,
- // U1 = UTC time that corresponds to L1,
- // U0 = U1 - hour.
- // Transitioning to DST moves local clock one hour forward L1 => L2, so
- // U0 = UTC time that corresponds to L0 = L0 - LocalTZA,
- // U1 = UTC time that corresponds to L1 = L1 - LocalTZA,
- // U1 = UTC time that corresponds to L2 = L2 - LocalTZA - hour.
- // Note that DST(U0 - hour) = 0, DST(U0) = 0, DST(U1) = 1.
- // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour),
- // U1 = L1 - LocalTZA - DST(L1 - LocalTZA - hour),
- // U1 = L2 - LocalTZA - DST(L2 - LocalTZA - hour).
- //
- // Consider transition from DST at local time L1.
- // Let L0 = L1 - hour,
- // U1 = UTC time that corresponds to L1,
- // U0 = U1 - hour, U2 = U1 + hour.
- // Transitioning from DST moves local clock one hour back L1 => L0, so
- // U0 = UTC time that corresponds to L0 (before transition)
- // = L0 - LocalTZA - hour.
- // U1 = UTC time that corresponds to L0 (after transition)
- // = L0 - LocalTZA = L1 - LocalTZA - hour
- // U2 = UTC time that corresponds to L1 = L1 - LocalTZA.
- // Note that DST(U0) = 1, DST(U1) = 0, DST(U2) = 0.
- // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour) = L0 - LocalTZA - DST(U0).
- // U2 = L1 - LocalTZA - DST(L1 - LocalTZA - hour) = L1 - LocalTZA - DST(U1).
- // It is impossible to get U1 from local time.
-
- const int kMsPerHour = 3600 * 1000;
- time_ms -= LocalOffsetInMs();
- return time_ms - DaylightSavingsOffsetInMs(time_ms - kMsPerHour);
+ return time_ms - LocalOffsetInMs(time_ms, false);
}
@@ -208,11 +167,7 @@ class DateCache {
return static_cast<int>(tz_cache_->DaylightSavingsOffset(time_ms));
}
- virtual int GetLocalOffsetFromOS() {
- double offset = tz_cache_->LocalTimeOffset();
- DCHECK_LT(offset, kInvalidLocalOffsetInMs);
- return static_cast<int>(offset);
- }
+ virtual int GetLocalOffsetFromOS(int64_t time_ms, bool is_utc);
private:
// The implementation relies on the fact that no time zones have
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index 3eae96aa11..f27e22cfbb 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -49,14 +49,14 @@ class SharedToCounterMap
namespace {
int StartPosition(SharedFunctionInfo* info) {
int start = info->function_token_position();
- if (start == kNoSourcePosition) start = info->start_position();
+ if (start == kNoSourcePosition) start = info->StartPosition();
return start;
}
bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
int a_start = StartPosition(a);
int b_start = StartPosition(b);
- if (a_start == b_start) return a->end_position() > b->end_position();
+ if (a_start == b_start) return a->EndPosition() > b->EndPosition();
return a_start < b_start;
}
@@ -67,6 +67,11 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
return a.start < b.start;
}
+void SortBlockData(std::vector<CoverageBlock>& v) {
+ // Sort according to the block nesting structure.
+ std::sort(v.begin(), v.end(), CompareCoverageBlock);
+}
+
std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
SharedFunctionInfo* shared) {
DCHECK(shared->HasCoverageInfo());
@@ -86,8 +91,7 @@ std::vector<CoverageBlock> GetSortedBlockData(Isolate* isolate,
result.emplace_back(start_pos, until_pos, count);
}
- // Sort according to the block nesting structure.
- std::sort(result.begin(), result.end(), CompareCoverageBlock);
+ SortBlockData(result);
return result;
}
@@ -243,6 +247,21 @@ void MergeDuplicateSingletons(CoverageFunction* function) {
}
}
+void MergeDuplicateRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next() && iter.HasNext()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& next_block = iter.GetNextBlock();
+
+ if (!HaveSameSourceRange(block, next_block)) continue;
+
+ DCHECK_NE(kNoSourcePosition, block.end); // Non-singleton range.
+ next_block.count = std::max(block.count, next_block.count);
+ iter.DeleteBlock();
+ }
+}
+
// Rewrite position singletons (produced by unconditional control flow
// like return statements, and by continuation counters) into source
// ranges that end at the next sibling range or the end of the parent
@@ -274,16 +293,13 @@ void RewritePositionSingletonsToRanges(CoverageFunction* function) {
}
}
-void MergeNestedAndConsecutiveRanges(CoverageFunction* function) {
+void MergeConsecutiveRanges(CoverageFunction* function) {
CoverageBlockIterator iter(function);
while (iter.Next()) {
CoverageBlock& block = iter.GetBlock();
- CoverageBlock& parent = iter.GetParent();
- if (parent.count == block.count) {
- iter.DeleteBlock();
- } else if (iter.HasSiblingOrChild()) {
+ if (iter.HasSiblingOrChild()) {
CoverageBlock& sibling = iter.GetSiblingOrChild();
if (sibling.start == block.end && sibling.count == block.count) {
// Best-effort: this pass may miss mergeable siblings in the presence of
@@ -295,6 +311,21 @@ void MergeNestedAndConsecutiveRanges(CoverageFunction* function) {
}
}
+void MergeNestedRanges(CoverageFunction* function) {
+ CoverageBlockIterator iter(function);
+
+ while (iter.Next()) {
+ CoverageBlock& block = iter.GetBlock();
+ CoverageBlock& parent = iter.GetParent();
+
+ if (parent.count == block.count) {
+ // Transformation may not be valid if sibling blocks exist with a
+ // differing count.
+ iter.DeleteBlock();
+ }
+ }
+}
+
void FilterUncoveredRanges(CoverageFunction* function) {
CoverageBlockIterator iter(function);
@@ -373,7 +404,15 @@ void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
RewritePositionSingletonsToRanges(function);
// Merge nested and consecutive ranges with identical counts.
- MergeNestedAndConsecutiveRanges(function);
+ // Note that it's necessary to merge duplicate ranges prior to merging nested
+ // changes in order to avoid invalid transformations. See crbug.com/827530.
+ MergeConsecutiveRanges(function);
+
+ SortBlockData(function->blocks);
+ MergeDuplicateRanges(function);
+ MergeNestedRanges(function);
+
+ MergeConsecutiveRanges(function);
// Filter out ranges with count == 0 unless the immediate parent range has
// a count != 0.
@@ -382,7 +421,6 @@ void CollectBlockCoverage(Isolate* isolate, CoverageFunction* function,
// Filter out ranges of zero length.
FilterEmptyRanges(function);
-
// Reset all counters on the DebugInfo to zero.
ResetAllBlockCounts(info);
}
@@ -480,7 +518,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
// Use sorted list to reconstruct function nesting.
for (SharedFunctionInfo* info : sorted) {
int start = StartPosition(info);
- int end = info->end_position();
+ int end = info->EndPosition();
uint32_t count = counter_map.Get(info);
// Find the correct outer function based on start position.
while (!nesting.empty() && functions->at(nesting.back()).end <= start) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 6052149b81..be45667829 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -22,7 +22,11 @@ namespace v8 {
namespace internal {
MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
- Handle<String> source) {
+ Handle<String> source,
+ bool throw_on_side_effect) {
+ // Disable breaks in side-effect free mode.
+ DisableBreak disable_break_scope(isolate->debug(), throw_on_side_effect);
+
Handle<Context> context = isolate->native_context();
ScriptOriginOptions origin_options(false, true);
MaybeHandle<SharedFunctionInfo> maybe_function_info =
@@ -37,8 +41,11 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<JSFunction> fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared_info,
context);
- return Execution::Call(isolate, fun,
- Handle<JSObject>(context->global_proxy()), 0, nullptr);
+ if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode();
+ MaybeHandle<Object> result = Execution::Call(
+ isolate, fun, Handle<JSObject>(context->global_proxy()), 0, nullptr);
+ if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode();
+ return result;
}
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
@@ -72,6 +79,48 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
return maybe_result;
}
+MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
+ Handle<String> source) {
+ // Handle the processing of break.
+ DisableBreak disable_break_scope(isolate->debug());
+ Factory* factory = isolate->factory();
+ JavaScriptFrameIterator it(isolate);
+
+ // Get context and receiver.
+ Handle<Context> native_context(
+ Context::cast(it.frame()->context())->native_context(), isolate);
+
+ // Materialize arguments as property on an extension object.
+ Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
+ Handle<String> arguments_str = factory->arguments_string();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ materialized, arguments_str,
+ Accessors::FunctionGetArguments(it.frame(), 0), NONE)
+ .Check();
+
+ // Materialize receiver.
+ Handle<String> this_str = factory->this_string();
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ materialized, this_str, Handle<Object>(it.frame()->receiver(), isolate),
+ NONE)
+ .Check();
+
+ // Use extension object in a debug-evaluate scope.
+ Handle<ScopeInfo> scope_info =
+ ScopeInfo::CreateForWithScope(isolate, Handle<ScopeInfo>::null());
+ scope_info->SetIsDebugEvaluateScope();
+ Handle<Context> evaluation_context =
+ factory->NewDebugEvaluateContext(native_context, scope_info, materialized,
+ Handle<Context>(), Handle<StringSet>());
+ Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared(),
+ isolate);
+ Handle<JSObject> receiver(native_context->global_proxy());
+ const bool throw_on_side_effect = false;
+ MaybeHandle<Object> maybe_result =
+ Evaluate(isolate, outer_info, evaluation_context, receiver, source,
+ throw_on_side_effect);
+ return maybe_result;
+}
// Compile and evaluate source for the given context.
MaybeHandle<Object> DebugEvaluate::Evaluate(
@@ -88,11 +137,14 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
Object);
Handle<Object> result;
- {
- NoSideEffectScope no_side_effect(isolate, throw_on_side_effect);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, eval_fun, receiver, 0, nullptr), Object);
+ bool sucess = false;
+ if (throw_on_side_effect) isolate->debug()->StartSideEffectCheckMode();
+ sucess = Execution::Call(isolate, eval_fun, receiver, 0, nullptr)
+ .ToHandle(&result);
+ if (throw_on_side_effect) isolate->debug()->StopSideEffectCheckMode();
+ if (!sucess) {
+ DCHECK(isolate->has_pending_exception());
+ return MaybeHandle<Object>();
}
// Skip the global proxy as it has no properties and always delegates to the
@@ -268,53 +320,53 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include both inlined and non-inlined version of an intrinsic.
#define INTRINSIC_WHITELIST(V) \
/* Conversions */ \
+ V(NumberToStringSkipCache) \
+ V(ToBigInt) \
V(ToInteger) \
- V(ToObject) \
- V(ToString) \
V(ToLength) \
V(ToNumber) \
- V(ToBigInt) \
- V(NumberToStringSkipCache) \
+ V(ToObject) \
+ V(ToString) \
/* Type checks */ \
- V(IsJSReceiver) \
- V(IsSmi) \
V(IsArray) \
- V(IsFunction) \
V(IsDate) \
- V(IsJSProxy) \
+ V(IsFunction) \
V(IsJSMap) \
+ V(IsJSProxy) \
+ V(IsJSReceiver) \
V(IsJSSet) \
V(IsJSWeakMap) \
V(IsJSWeakSet) \
V(IsRegExp) \
+ V(IsSmi) \
V(IsTypedArray) \
/* Loads */ \
V(LoadLookupSlotForCall) \
/* Arrays */ \
V(ArraySpeciesConstructor) \
- V(NormalizeElements) \
+ V(EstimateNumberOfElements) \
V(GetArrayKeys) \
- V(TrySliceSimpleNonFastElements) \
V(HasComplexElements) \
- V(EstimateNumberOfElements) \
V(NewArray) \
+ V(NormalizeElements) \
+ V(TrySliceSimpleNonFastElements) \
V(TypedArrayGetBuffer) \
/* Errors */ \
+ V(NewTypeError) \
V(ReThrow) \
+ V(ThrowCalledNonCallable) \
+ V(ThrowInvalidStringLength) \
+ V(ThrowIteratorResultNotAnObject) \
V(ThrowReferenceError) \
V(ThrowSymbolIteratorInvalid) \
- V(ThrowIteratorResultNotAnObject) \
- V(NewTypeError) \
- V(ThrowInvalidStringLength) \
- V(ThrowCalledNonCallable) \
/* Strings */ \
- V(StringIndexOf) \
+ V(RegExpInternalReplace) \
V(StringIncludes) \
+ V(StringIndexOf) \
V(StringReplaceOneCharWithString) \
+ V(StringSubstring) \
V(StringToNumber) \
V(StringTrim) \
- V(StringSubstring) \
- V(RegExpInternalReplace) \
/* BigInts */ \
V(BigIntEqualToBigInt) \
V(BigIntToBoolean) \
@@ -324,48 +376,49 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(CreateObjectLiteral) \
V(CreateRegExpLiteral) \
/* Called from builtins */ \
- V(ClassOf) \
- V(StringAdd) \
- V(StringParseFloat) \
- V(StringParseInt) \
- V(StringCharCodeAt) \
- V(StringIndexOfUnchecked) \
- V(StringEqual) \
- V(RegExpInitializeAndCompile) \
- V(SymbolDescriptiveString) \
- V(GenerateRandomNumbers) \
- V(GlobalPrint) \
V(AllocateInNewSpace) \
V(AllocateInTargetSpace) \
V(AllocateSeqOneByteString) \
V(AllocateSeqTwoByteString) \
+ V(ArrayIncludes_Slow) \
+ V(ArrayIndexOf) \
+ V(ArrayIsArray) \
+ V(ClassOf) \
+ V(GenerateRandomNumbers) \
+ V(GetFunctionName) \
+ V(GetOwnPropertyDescriptor) \
+ V(GlobalPrint) \
+ V(HasProperty) \
V(ObjectCreate) \
V(ObjectEntries) \
V(ObjectEntriesSkipFastPath) \
V(ObjectHasOwnProperty) \
V(ObjectValues) \
V(ObjectValuesSkipFastPath) \
- V(ArrayIndexOf) \
- V(ArrayIncludes_Slow) \
- V(ArrayIsArray) \
- V(ThrowTypeError) \
+ V(RegExpInitializeAndCompile) \
+ V(StackGuard) \
+ V(StringAdd) \
+ V(StringCharCodeAt) \
+ V(StringEqual) \
+ V(StringIndexOfUnchecked) \
+ V(StringParseFloat) \
+ V(StringParseInt) \
+ V(SymbolDescriptiveString) \
V(ThrowRangeError) \
+ V(ThrowTypeError) \
V(ToName) \
- V(GetOwnPropertyDescriptor) \
- V(HasProperty) \
- V(StackGuard) \
/* Misc. */ \
V(Call) \
- V(MaxSmi) \
- V(NewObject) \
V(CompleteInobjectSlackTrackingForMap) \
V(HasInPrototypeChain) \
+ V(MaxSmi) \
+ V(NewObject) \
V(StringMaxLength) \
/* Test */ \
- V(OptimizeOsr) \
+ V(GetOptimizationStatus) \
V(OptimizeFunctionOnNextCall) \
- V(UnblockConcurrentRecompilation) \
- V(GetOptimizationStatus)
+ V(OptimizeOsr) \
+ V(UnblockConcurrentRecompilation)
#define CASE(Name) \
case Runtime::k##Name: \
@@ -399,6 +452,8 @@ bool BuiltinToIntrinsicHasNoSideEffect(Builtins::Name builtin_id,
V(Builtins::kArrayMap, W(CreateDataProperty)) \
V(Builtins::kArrayPrototypeSlice, W(CreateDataProperty) W(SetProperty)) \
/* TypedArrays */ \
+ V(Builtins::kTypedArrayConstructor, \
+ W(TypedArrayCopyElements) W(ThrowInvalidTypedArrayAlignment)) \
V(Builtins::kTypedArrayPrototypeFilter, W(TypedArrayCopyElements)) \
V(Builtins::kTypedArrayPrototypeMap, W(SetProperty))
@@ -502,6 +557,7 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kToObject:
case Bytecode::kToNumber:
case Bytecode::kToName:
+ case Bytecode::kToString:
// Misc.
case Bytecode::kForInEnumerate:
case Bytecode::kForInPrepare:
@@ -520,10 +576,6 @@ bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
case Bytecode::kSetPendingMessage:
return true;
default:
- if (FLAG_trace_side_effect_free_debug_evaluate) {
- PrintF("[debug-evaluate] bytecode %s may cause side effect.\n",
- Bytecodes::ToString(bytecode));
- }
return false;
}
}
@@ -565,7 +617,6 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kArrayEvery:
case Builtins::kArraySome:
case Builtins::kArrayConcat:
- case Builtins::kArraySlice:
case Builtins::kArrayFilter:
case Builtins::kArrayMap:
case Builtins::kArrayReduce:
@@ -711,6 +762,12 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kNumberPrototypeToPrecision:
case Builtins::kNumberPrototypeToString:
case Builtins::kNumberPrototypeValueOf:
+ // BigInt builtins.
+ case Builtins::kBigIntConstructor:
+ case Builtins::kBigIntAsIntN:
+ case Builtins::kBigIntAsUintN:
+ case Builtins::kBigIntPrototypeToString:
+ case Builtins::kBigIntPrototypeValueOf:
// Set builtins.
case Builtins::kSetConstructor:
case Builtins::kSetPrototypeEntries:
@@ -782,7 +839,14 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kGlobalUnescape:
case Builtins::kGlobalIsFinite:
case Builtins::kGlobalIsNaN:
+ // Function builtins.
+ case Builtins::kFunctionPrototypeToString:
+ case Builtins::kFunctionPrototypeBind:
+ case Builtins::kFastFunctionPrototypeBind:
+ case Builtins::kFunctionPrototypeCall:
+ case Builtins::kFunctionPrototypeApply:
// Error builtins.
+ case Builtins::kErrorConstructor:
case Builtins::kMakeError:
case Builtins::kMakeTypeError:
case Builtins::kMakeSyntaxError:
@@ -798,31 +862,37 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
}
}
-static const Address accessors_with_no_side_effect[] = {
- // Whitelist for accessors.
- FUNCTION_ADDR(Accessors::StringLengthGetter),
- FUNCTION_ADDR(Accessors::ArrayLengthGetter),
- FUNCTION_ADDR(Accessors::FunctionLengthGetter),
- FUNCTION_ADDR(Accessors::FunctionNameGetter),
- FUNCTION_ADDR(Accessors::BoundFunctionLengthGetter),
- FUNCTION_ADDR(Accessors::BoundFunctionNameGetter),
-};
+bool BytecodeRequiresRuntimeCheck(interpreter::Bytecode bytecode) {
+ typedef interpreter::Bytecode Bytecode;
+ switch (bytecode) {
+ case Bytecode::kStaNamedProperty:
+ case Bytecode::kStaNamedOwnProperty:
+ case Bytecode::kStaKeyedProperty:
+ case Bytecode::kStaInArrayLiteral:
+ case Bytecode::kStaDataPropertyInLiteral:
+ case Bytecode::kStaCurrentContextSlot:
+ return true;
+ default:
+ return false;
+ }
+}
} // anonymous namespace
// static
-bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
+DebugEvaluate::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
+ Handle<SharedFunctionInfo> info) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Checking function %s for side effect.\n",
info->DebugName()->ToCString().get());
}
DCHECK(info->is_compiled());
-
if (info->HasBytecodeArray()) {
// Check bytecodes against whitelist.
- Handle<BytecodeArray> bytecode_array(info->bytecode_array());
+ Handle<BytecodeArray> bytecode_array(info->GetBytecodeArray());
if (FLAG_trace_side_effect_free_debug_evaluate) bytecode_array->Print();
+ bool requires_runtime_checks = false;
for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
it.Advance()) {
interpreter::Bytecode bytecode = it.current_bytecode();
@@ -833,20 +903,34 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
? it.GetIntrinsicIdOperand(0)
: it.GetRuntimeIdOperand(0);
if (IntrinsicHasNoSideEffect(id)) continue;
- return false;
+ return kHasSideEffects;
}
if (BytecodeHasNoSideEffect(bytecode)) continue;
+ if (BytecodeRequiresRuntimeCheck(bytecode)) {
+ requires_runtime_checks = true;
+ continue;
+ }
+
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] bytecode %s may cause side effect.\n",
+ interpreter::Bytecodes::ToString(bytecode));
+ }
// Did not match whitelist.
- return false;
+ return kHasSideEffects;
+ }
+ return requires_runtime_checks ? kRequiresRuntimeChecks : kHasNoSideEffect;
+ } else if (info->IsApiFunction()) {
+ if (info->GetCode()->is_builtin()) {
+ return info->GetCode()->builtin_index() == Builtins::kHandleApiCall
+ ? kHasNoSideEffect
+ : kHasSideEffects;
}
- return true;
} else {
// Check built-ins against whitelist.
- int builtin_index = info->HasLazyDeserializationBuiltinId()
- ? info->lazy_deserialization_builtin_id()
- : info->code()->builtin_index();
+ int builtin_index =
+ info->HasBuiltinId() ? info->builtin_id() : Builtins::kNoBuiltinId;
DCHECK_NE(Builtins::kDeserializeLazy, builtin_index);
if (Builtins::IsBuiltinId(builtin_index) &&
BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
@@ -880,24 +964,54 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
DCHECK(!failed);
}
#endif // DEBUG
- return true;
+ return kHasNoSideEffect;
}
}
- return false;
+ return kHasSideEffects;
}
// static
-bool DebugEvaluate::CallbackHasNoSideEffect(Address function_addr) {
- for (size_t i = 0; i < arraysize(accessors_with_no_side_effect); i++) {
- if (function_addr == accessors_with_no_side_effect[i]) return true;
+bool DebugEvaluate::CallbackHasNoSideEffect(Object* callback_info) {
+ DisallowHeapAllocation no_gc;
+ if (callback_info->IsAccessorInfo()) {
+ // List of whitelisted internal accessors can be found in accessors.h.
+ AccessorInfo* info = AccessorInfo::cast(callback_info);
+ if (info->has_no_side_effect()) return true;
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API Callback '");
+ info->name()->ShortPrint();
+ PrintF("' may cause side effect.\n");
+ }
+ } else if (callback_info->IsInterceptorInfo()) {
+ InterceptorInfo* info = InterceptorInfo::cast(callback_info);
+ if (info->has_no_side_effect()) return true;
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API Interceptor may cause side effect.\n");
+ }
+ } else if (callback_info->IsCallHandlerInfo()) {
+ CallHandlerInfo* info = CallHandlerInfo::cast(callback_info);
+ if (info->IsSideEffectFreeCallHandlerInfo()) return true;
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n");
+ }
}
+ return false;
+}
- if (FLAG_trace_side_effect_free_debug_evaluate) {
- PrintF("[debug-evaluate] API Callback at %p may cause side effect.\n",
- reinterpret_cast<void*>(function_addr));
+// static
+void DebugEvaluate::ApplySideEffectChecks(
+ Handle<BytecodeArray> bytecode_array) {
+ for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
+ it.Advance()) {
+ interpreter::Bytecode bytecode = it.current_bytecode();
+ if (BytecodeRequiresRuntimeCheck(bytecode)) {
+ interpreter::Bytecode debugbreak =
+ interpreter::Bytecodes::GetDebugBreak(bytecode);
+ bytecode_array->set(it.current_offset(),
+ interpreter::Bytecodes::ToByte(debugbreak));
+ }
}
- return false;
}
} // namespace internal
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index fbe747d024..6cfa5a2e1d 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -18,7 +18,8 @@ class FrameInspector;
class DebugEvaluate : public AllStatic {
public:
- static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source);
+ static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
+ bool throw_on_side_effect);
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
@@ -30,8 +31,21 @@ class DebugEvaluate : public AllStatic {
Handle<String> source,
bool throw_on_side_effect);
- static bool FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info);
- static bool CallbackHasNoSideEffect(Address function_addr);
+ // This is used for break-at-entry for builtins and API functions.
+ // Evaluate a piece of JavaScript in the native context, but with the
+ // materialized arguments object and receiver of the current call.
+ static MaybeHandle<Object> WithTopmostArguments(Isolate* isolate,
+ Handle<String> source);
+
+ enum SideEffectState {
+ kHasSideEffects,
+ kRequiresRuntimeChecks,
+ kHasNoSideEffect
+ };
+ static SideEffectState FunctionGetSideEffectState(
+ Handle<SharedFunctionInfo> info);
+ static bool CallbackHasNoSideEffect(Object* callback_info);
+ static void ApplySideEffectChecks(Handle<BytecodeArray> bytecode_array);
private:
// This class builds a context chain for evaluation of expressions
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 6d50be6e0e..b3f3ad917c 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -5,6 +5,7 @@
#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
#define V8_DEBUG_DEBUG_INTERFACE_H_
+#include "include/v8-inspector.h"
#include "include/v8-util.h"
#include "include/v8.h"
@@ -29,6 +30,9 @@ namespace debug {
void SetContextId(Local<Context> context, int id);
int GetContextId(Local<Context> context);
+void SetInspector(Isolate* isolate, v8_inspector::V8Inspector*);
+v8_inspector::V8Inspector* GetInspector(Isolate* isolate);
+
/**
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
@@ -119,8 +123,11 @@ bool AllFramesOnStackAreBlackboxed(Isolate* isolate);
* \param data the parameter provided during callback installation.
*/
typedef void (*OutOfMemoryCallback)(void* data);
-void SetOutOfMemoryCallback(Isolate* isolate, OutOfMemoryCallback callback,
- void* data);
+
+V8_DEPRECATED("Use v8::Isolate::AddNearHeapLimitCallback",
+ void SetOutOfMemoryCallback(Isolate* isolate,
+ OutOfMemoryCallback callback,
+ void* data));
/**
* Native wrapper around v8::internal::Script object.
@@ -166,6 +173,7 @@ class WasmScript : public Script {
std::pair<int, int> GetFunctionRange(int function_index) const;
debug::WasmDisassembly DisassembleFunction(int function_index) const;
+ uint32_t GetFunctionHash(int function_index);
};
void GetLoadedScripts(Isolate* isolate, PersistentValueVector<Script>& scripts);
@@ -204,6 +212,10 @@ void ResetBlackboxedStateCache(Isolate* isolate,
int EstimatedValueSize(Isolate* isolate, v8::Local<v8::Value> value);
+v8::MaybeLocal<v8::Array> EntriesPreview(Isolate* isolate,
+ v8::Local<v8::Value> value,
+ bool* is_key_value);
+
enum Builtin {
kObjectKeys,
kObjectGetPrototypeOf,
@@ -494,7 +506,13 @@ int GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
int64_t GetNextRandomInt64(v8::Isolate* isolate);
v8::MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
- v8::Local<v8::String> source);
+ v8::Local<v8::String> source,
+ bool throw_on_side_effect);
+
+int GetDebuggingId(v8::Local<v8::Function> function);
+
+bool SetFunctionBreakpoint(v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition, BreakpointId* id);
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index fda85bd88d..fa93341e91 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -82,9 +82,8 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
}
}
if (scope_info->scope_type() == FUNCTION_SCOPE) {
- nested_scope_chain_.emplace_back(scope_info,
- shared_info->start_position(),
- shared_info->end_position());
+ nested_scope_chain_.emplace_back(scope_info, shared_info->StartPosition(),
+ shared_info->EndPosition());
}
if (!collect_non_locals) return;
}
@@ -177,8 +176,8 @@ void ScopeIterator::UnwrapEvaluationContext() {
}
}
-
-MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
+V8_WARN_UNUSED_RESULT MaybeHandle<JSObject>
+ScopeIterator::MaterializeScopeDetails() {
// Calculate the size of the result.
Handle<FixedArray> details =
isolate_->factory()->NewFixedArray(kScopeDetailsSize);
@@ -219,7 +218,7 @@ int ScopeIterator::start_position() {
}
if (!HasContext()) return 0;
Handle<JSFunction> js_function = handle(CurrentContext()->closure());
- return js_function.is_null() ? 0 : js_function->shared()->start_position();
+ return js_function.is_null() ? 0 : js_function->shared()->StartPosition();
}
int ScopeIterator::end_position() {
@@ -228,7 +227,7 @@ int ScopeIterator::end_position() {
}
if (!HasContext()) return 0;
Handle<JSFunction> js_function = handle(CurrentContext()->closure());
- return js_function.is_null() ? 0 : js_function->shared()->end_position();
+ return js_function.is_null() ? 0 : js_function->shared()->EndPosition();
}
void ScopeIterator::Next() {
@@ -957,8 +956,8 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
// Do not collect scopes of nested inner functions inside the current one.
// Nested arrow functions could have the same end positions.
Handle<JSFunction> function = GetFunction();
- if (scope->start_position() > function->shared()->start_position() &&
- scope->end_position() <= function->shared()->end_position()) {
+ if (scope->start_position() > function->shared()->StartPosition() &&
+ scope->end_position() <= function->shared()->EndPosition()) {
return;
}
}
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 9321b8f995..ba1274f62c 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -49,7 +49,7 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate, Handle<JSFunction> function);
ScopeIterator(Isolate* isolate, Handle<JSGeneratorObject> generator);
- MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
// More scopes?
bool Done() { return context_.is_null(); }
@@ -126,9 +126,9 @@ class ScopeIterator {
void UnwrapEvaluationContext();
- MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
- MUST_USE_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
- MUST_USE_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeScriptScope();
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeLocalScope();
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> MaterializeModuleScope();
Handle<JSObject> MaterializeClosure();
Handle<JSObject> MaterializeCatchScope();
Handle<JSObject> MaterializeInnerScope();
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 69eaeb6cad..ae275538d7 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -5,11 +5,14 @@
#include "src/debug/debug.h"
#include <memory>
+#include <unordered_set>
#include "src/api.h"
#include "src/arguments.h"
#include "src/assembler-inl.h"
+#include "src/base/platform/mutex.h"
#include "src/bootstrapper.h"
+#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/compiler.h"
@@ -21,17 +24,51 @@
#include "src/global-handles.h"
#include "src/globals.h"
#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/log.h"
#include "src/messages.h"
#include "src/objects/debug-objects-inl.h"
#include "src/snapshot/natives.h"
+#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
+class Debug::TemporaryObjectsTracker : public HeapObjectAllocationTracker {
+ public:
+ TemporaryObjectsTracker() = default;
+ ~TemporaryObjectsTracker() = default;
+
+ void AllocationEvent(Address addr, int) override { objects_.insert(addr); }
+
+ void MoveEvent(Address from, Address to, int) override {
+ if (from == to) return;
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ auto it = objects_.find(from);
+ if (it == objects_.end()) {
+ // If temporary object was collected we can get MoveEvent which moves
+ // existing non temporary object to the address where we had temporary
+ // object. So we should mark new address as non temporary.
+ objects_.erase(to);
+ return;
+ }
+ objects_.erase(it);
+ objects_.insert(to);
+ }
+
+ bool HasObject(Address addr) const {
+ return objects_.find(addr) != objects_.end();
+ }
+
+ private:
+ std::unordered_set<Address> objects_;
+ base::Mutex mutex_;
+ DISALLOW_COPY_AND_ASSIGN(TemporaryObjectsTracker);
+};
+
Debug::Debug(Isolate* isolate)
: debug_context_(Handle<Context>()),
is_active_(false),
@@ -49,6 +86,8 @@ Debug::Debug(Isolate* isolate)
ThreadInit();
}
+Debug::~Debug() { DCHECK_NULL(debug_delegate_); }
+
BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame) {
if (debug_info->CanBreakAtEntry()) {
@@ -152,7 +191,7 @@ BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
break_index_(-1),
source_position_iterator_(
debug_info->DebugBytecodeArray()->SourcePositionTable()) {
- position_ = debug_info->shared()->start_position();
+ position_ = debug_info->shared()->StartPosition();
statement_position_ = position_;
// There is at least one break location.
DCHECK(!Done());
@@ -374,6 +413,9 @@ bool Debug::Load() {
if (context.is_null()) return false;
debug_context_ = isolate_->global_handles()->Create(*context);
+ GlobalHandles::AnnotateStrongRetainer(
+ Handle<Object>::cast(debug_context_).location(),
+ "v8::internal::Debug::debug_context_");
feature_tracker()->Track(DebugFeatureTracker::kActive);
@@ -413,6 +455,8 @@ void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
// Return if we fail to retrieve debug info.
Handle<SharedFunctionInfo> shared(break_target->shared());
if (!EnsureBreakInfo(shared)) return;
+ PrepareFunctionForDebugExecution(shared);
+
Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
// Find the break location where execution has stopped.
@@ -510,9 +554,7 @@ MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
if (has_break_points) *has_break_points = has_break_points_to_check;
if (!has_break_points_to_check) return {};
- Handle<Object> break_points =
- debug_info->GetBreakPoints(location->position());
- return Debug::GetHitBreakPoints(break_points);
+ return Debug::GetHitBreakPoints(debug_info, location->position());
}
@@ -567,17 +609,28 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
// Check whether a single break point object is triggered.
-bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point) {
+bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
+ bool is_break_at_entry) {
HandleScope scope(isolate_);
if (!break_point->condition()->length()) return true;
Handle<String> condition(break_point->condition());
+ MaybeHandle<Object> maybe_result;
Handle<Object> result;
- // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
- // we can use 0 as index of inlined frame.
- if (!DebugEvaluate::Local(isolate_, break_frame_id(),
- /* inlined_jsframe_index */ 0, condition, false)
- .ToHandle(&result)) {
+
+ if (is_break_at_entry) {
+ maybe_result = DebugEvaluate::WithTopmostArguments(isolate_, condition);
+ } else {
+ // Since we call CheckBreakpoint only for deoptimized frame on top of stack,
+ // we can use 0 as index of inlined frame.
+ const int inlined_jsframe_index = 0;
+ const bool throw_on_side_effect = false;
+ maybe_result =
+ DebugEvaluate::Local(isolate_, break_frame_id(), inlined_jsframe_index,
+ condition, throw_on_side_effect);
+ }
+
+ if (!maybe_result.ToHandle(&result)) {
if (isolate_->has_pending_exception()) {
isolate_->clear_pending_exception();
}
@@ -593,8 +646,9 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
// Make sure the function is compiled and has set up the debug info.
Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureBreakInfo(shared)) return true;
- PrepareFunctionForBreakPoints(shared);
+ if (!EnsureBreakInfo(shared)) return false;
+ PrepareFunctionForDebugExecution(shared);
+
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Source positions starts with zero.
DCHECK_LE(0, *source_position);
@@ -613,8 +667,11 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
}
bool Debug::SetBreakPointForScript(Handle<Script> script,
- Handle<BreakPoint> break_point,
- int* source_position) {
+ Handle<String> condition,
+ int* source_position, int* id) {
+ *id = ++thread_local_.last_breakpoint_id_;
+ Handle<BreakPoint> break_point =
+ isolate_->factory()->NewBreakPoint(*id, condition);
if (script->type() == Script::TYPE_WASM) {
Handle<WasmCompiledModule> compiled_module(
WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
@@ -632,12 +689,12 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
// Make sure the function has set up the debug info.
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
if (!EnsureBreakInfo(shared)) return false;
- PrepareFunctionForBreakPoints(shared);
+ PrepareFunctionForDebugExecution(shared);
// Find position within function. The script position might be before the
// source position of the first function.
- if (shared->start_position() > *source_position) {
- *source_position = shared->start_position();
+ if (shared->StartPosition() > *source_position) {
+ *source_position = shared->StartPosition();
}
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
@@ -672,6 +729,19 @@ int Debug::FindBreakablePosition(Handle<DebugInfo> debug_info,
}
}
+void Debug::ApplyInstrumentation(Handle<SharedFunctionInfo> shared) {
+ DCHECK(shared->HasBytecodeArray());
+ Handle<DebugInfo> debug_info(GetOrCreateDebugInfo(shared));
+ DCHECK_NE(debug_info->DebugExecutionMode(), isolate_->debug_execution_mode());
+ if (isolate_->debug_execution_mode() == DebugInfo::kBreakpoints) {
+ ClearSideEffectChecks(debug_info);
+ ApplyBreakPoints(debug_info);
+ } else {
+ ClearBreakPoints(debug_info);
+ ApplySideEffectChecks(debug_info);
+ }
+}
+
void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
DisallowHeapAllocation no_gc;
if (debug_info->CanBreakAtEntry()) {
@@ -689,6 +759,7 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
it.SetDebugBreak();
}
}
+ debug_info->SetDebugExecutionMode(DebugInfo::kBreakpoints);
}
void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
@@ -697,7 +768,9 @@ void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
} else {
// If we attempt to clear breakpoints but none exist, simply return. This
// can happen e.g. CoverageInfos exist but no breakpoints are set.
- if (!debug_info->HasDebugBytecodeArray()) return;
+ if (!debug_info->HasDebugBytecodeArray() || !debug_info->HasBreakInfo()) {
+ return;
+ }
DisallowHeapAllocation no_gc;
for (BreakIterator it(debug_info); !it.Done(); it.Next()) {
@@ -711,6 +784,7 @@ void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
for (DebugInfoListNode* node = debug_info_list_; node != nullptr;
node = node->next()) {
+ if (!node->debug_info()->HasBreakInfo()) continue;
Handle<Object> result =
DebugInfo::FindBreakPointInfo(node->debug_info(), break_point);
if (result->IsUndefined(isolate_)) continue;
@@ -727,12 +801,13 @@ void Debug::ClearBreakPoint(Handle<BreakPoint> break_point) {
}
}
-bool Debug::SetBreakpoint(Handle<Script> script, Handle<String> condition,
- int* offset, int* id) {
+bool Debug::SetBreakpointForFunction(Handle<JSFunction> function,
+ Handle<String> condition, int* id) {
*id = ++thread_local_.last_breakpoint_id_;
Handle<BreakPoint> breakpoint =
isolate_->factory()->NewBreakPoint(*id, condition);
- return SetBreakPointForScript(script, breakpoint, offset);
+ int source_position = 0;
+ return SetBreakPoint(function, breakpoint, &source_position);
}
void Debug::RemoveBreakpoint(int id) {
@@ -754,7 +829,8 @@ void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared,
if (IsBlackboxed(shared)) return;
// Make sure the function is compiled and has set up the debug info.
if (!EnsureBreakInfo(shared)) return;
- PrepareFunctionForBreakPoints(shared);
+ PrepareFunctionForDebugExecution(shared);
+
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Flood the function with break points.
DCHECK(debug_info->HasDebugBytecodeArray());
@@ -781,10 +857,16 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
}
-MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<Object> break_points) {
+MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<DebugInfo> debug_info,
+ int position) {
+ Handle<Object> break_points = debug_info->GetBreakPoints(position);
+ bool is_break_at_entry = debug_info->BreakAtEntry();
DCHECK(!break_points->IsUndefined(isolate_));
if (!break_points->IsFixedArray()) {
- if (!CheckBreakPoint(Handle<BreakPoint>::cast(break_points))) return {};
+ if (!CheckBreakPoint(Handle<BreakPoint>::cast(break_points),
+ is_break_at_entry)) {
+ return {};
+ }
Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
break_points_hit->set(0, *break_points);
return break_points_hit;
@@ -797,7 +879,8 @@ MaybeHandle<FixedArray> Debug::GetHitBreakPoints(Handle<Object> break_points) {
int break_points_hit_count = 0;
for (int i = 0; i < num_objects; ++i) {
Handle<Object> break_point(array->get(i), isolate_);
- if (CheckBreakPoint(Handle<BreakPoint>::cast(break_point))) {
+ if (CheckBreakPoint(Handle<BreakPoint>::cast(break_point),
+ is_break_at_entry)) {
break_points_hit->set(break_points_hit_count++, *break_point);
}
}
@@ -941,6 +1024,8 @@ void Debug::PrepareStep(StepAction step_action) {
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureBreakInfo(shared)) return;
+ PrepareFunctionForDebugExecution(shared);
+
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
@@ -1133,19 +1218,30 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
}
}
-void Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
+void Debug::PrepareFunctionForDebugExecution(
+ Handle<SharedFunctionInfo> shared) {
// To prepare bytecode for debugging, we already need to have the debug
// info (containing the debug copy) upfront, but since we do not recompile,
// preparing for break points cannot fail.
DCHECK(shared->is_compiled());
DCHECK(shared->HasDebugInfo());
- DCHECK(shared->HasBreakInfo());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->IsPreparedForBreakpoints()) return;
+ if (debug_info->flags() & DebugInfo::kPreparedForDebugExecution) return;
+
+ // Make a copy of the bytecode array if available.
+ Handle<Object> maybe_debug_bytecode_array =
+ isolate_->factory()->undefined_value();
+ if (shared->HasBytecodeArray()) {
+ Handle<BytecodeArray> original(shared->GetBytecodeArray());
+ maybe_debug_bytecode_array =
+ isolate_->factory()->CopyBytecodeArray(original);
+ }
+ debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
Deoptimizer::DeoptimizeAll(isolate_);
+ InstallDebugBreakTrampoline();
} else {
DeoptimizeFunction(shared);
// Update PCs on the stack to point to recompiled code.
@@ -1153,9 +1249,59 @@ void Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
}
-
debug_info->set_flags(debug_info->flags() |
- DebugInfo::kPreparedForBreakpoints);
+ DebugInfo::kPreparedForDebugExecution);
+}
+
+void Debug::InstallDebugBreakTrampoline() {
+ // Check the list of debug infos whether the debug break trampoline needs to
+ // be installed. If that's the case, iterate the heap for functions to rewire
+ // to the trampoline.
+ HandleScope scope(isolate_);
+ // If there is a breakpoint at function entry, we need to install trampoline.
+ bool needs_to_use_trampoline = false;
+ // If there we break at entry to an api callback, we need to clear ICs.
+ bool needs_to_clear_ic = false;
+ for (DebugInfoListNode* current = debug_info_list_; current != nullptr;
+ current = current->next()) {
+ if (current->debug_info()->CanBreakAtEntry()) {
+ needs_to_use_trampoline = true;
+ if (current->debug_info()->shared()->IsApiFunction()) {
+ needs_to_clear_ic = true;
+ break;
+ }
+ }
+ }
+
+ if (!needs_to_use_trampoline) return;
+
+ Handle<Code> trampoline = BUILTIN_CODE(isolate_, DebugBreakTrampoline);
+ std::vector<Handle<JSFunction>> needs_compile;
+ {
+ HeapIterator iterator(isolate_->heap());
+ while (HeapObject* obj = iterator.next()) {
+ if (needs_to_clear_ic && obj->IsFeedbackVector()) {
+ FeedbackVector::cast(obj)->ClearSlots(isolate_);
+ continue;
+ } else if (obj->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(obj);
+ SharedFunctionInfo* shared = fun->shared();
+ if (!shared->HasDebugInfo()) continue;
+ if (!shared->GetDebugInfo()->CanBreakAtEntry()) continue;
+ if (!fun->is_compiled()) {
+ needs_compile.push_back(handle(fun));
+ } else {
+ fun->set_code(*trampoline);
+ }
+ }
+ }
+ }
+ // By overwriting the function code with DebugBreakTrampoline, which tailcalls
+ // to shared code, we bypass CompileLazy. Perform CompileLazy here instead.
+ for (Handle<JSFunction> fun : needs_compile) {
+ Compiler::Compile(fun, Compiler::CLEAR_EXCEPTION);
+ fun->set_code(*trampoline);
+ }
}
namespace {
@@ -1191,6 +1337,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>::cast(result);
if (!EnsureBreakInfo(shared)) return false;
+ PrepareFunctionForDebugExecution(shared);
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
FindBreakablePositions(debug_info, start_position, end_position, locations);
@@ -1203,8 +1350,8 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
SharedFunctionInfo::ScriptIterator iterator(script);
for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
info = iterator.Next()) {
- if (info->end_position() < start_position ||
- info->start_position() >= end_position) {
+ if (info->EndPosition() < start_position ||
+ info->StartPosition() >= end_position) {
continue;
}
if (!info->IsSubjectToDebugging()) continue;
@@ -1224,6 +1371,7 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
}
}
if (!EnsureBreakInfo(candidate)) return false;
+ PrepareFunctionForDebugExecution(candidate);
}
if (was_compiled) continue;
@@ -1250,15 +1398,15 @@ class SharedFunctionInfoFinder {
if (!shared->IsSubjectToDebugging()) return;
int start_position = shared->function_token_position();
if (start_position == kNoSourcePosition) {
- start_position = shared->start_position();
+ start_position = shared->StartPosition();
}
if (start_position > target_position_) return;
- if (target_position_ > shared->end_position()) return;
+ if (target_position_ > shared->EndPosition()) return;
if (current_candidate_ != nullptr) {
if (current_start_position_ == start_position &&
- shared->end_position() == current_candidate_->end_position()) {
+ shared->EndPosition() == current_candidate_->EndPosition()) {
// If we already have a matching closure, do not throw it away.
if (current_candidate_closure_ != nullptr && closure == nullptr) return;
// If a top-level function contains only one function
@@ -1266,7 +1414,7 @@ class SharedFunctionInfoFinder {
// is the same. In that case prefer the non top-level function.
if (!current_candidate_->is_toplevel() && shared->is_toplevel()) return;
} else if (start_position < current_start_position_ ||
- current_candidate_->end_position() < shared->end_position()) {
+ current_candidate_->EndPosition() < shared->EndPosition()) {
return;
}
}
@@ -1319,7 +1467,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// If the iteration count is larger than 1, we had to compile the outer
// function in order to create this shared function info. So there can
// be no JSFunction referencing it. We can anticipate creating a debug
- // info while bypassing PrepareFunctionForBreakpoints.
+ // info while bypassing PrepareFunctionForDebugExecution.
if (iteration > 1) {
AllowHeapAllocation allow_before_return;
CreateBreakInfo(shared_handle);
@@ -1348,6 +1496,10 @@ bool Debug::EnsureBreakInfo(Handle<SharedFunctionInfo> shared) {
!Compiler::Compile(shared, Compiler::CLEAR_EXCEPTION)) {
return false;
}
+ if (shared->GetCode() ==
+ isolate_->builtins()->builtin(Builtins::kDeserializeLazy)) {
+ Snapshot::EnsureBuiltinIsDeserialized(isolate_, shared);
+ }
CreateBreakInfo(shared);
return true;
}
@@ -1364,18 +1516,10 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
Handle<FixedArray> break_points(
factory->NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
- // Make a copy of the bytecode array if available.
- Handle<Object> maybe_debug_bytecode_array = factory->undefined_value();
- if (shared->HasBytecodeArray()) {
- Handle<BytecodeArray> original(shared->bytecode_array());
- maybe_debug_bytecode_array = factory->CopyBytecodeArray(original);
- }
-
int flags = debug_info->flags();
flags |= DebugInfo::kHasBreakInfo;
if (CanBreakAtEntry(shared)) flags |= DebugInfo::kCanBreakAtEntry;
debug_info->set_flags(flags);
- debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
debug_info->set_break_points(*break_points);
}
@@ -1514,11 +1658,11 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
GarbageCollectionReason::kDebugger);
Factory* factory = isolate_->factory();
- if (!factory->script_list()->IsWeakFixedArray()) {
+ if (!factory->script_list()->IsFixedArrayOfWeakCells()) {
return factory->empty_fixed_array();
}
- Handle<WeakFixedArray> array =
- Handle<WeakFixedArray>::cast(factory->script_list());
+ Handle<FixedArrayOfWeakCells> array =
+ Handle<FixedArrayOfWeakCells>::cast(factory->script_list());
Handle<FixedArray> results = factory->NewFixedArray(array->Length());
int length = 0;
{
@@ -1802,19 +1946,20 @@ void Debug::RunPromiseHook(PromiseHookType hook_type, Handle<JSPromise> promise,
return;
}
last_frame_was_promise_builtin = false;
- Handle<Code> code(info->code());
- if (*code == *BUILTIN_CODE(isolate_, AsyncFunctionPromiseCreate)) {
- type = debug::kDebugAsyncFunctionPromiseCreated;
- last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeThen)) {
- type = debug::kDebugPromiseThen;
- last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeCatch)) {
- type = debug::kDebugPromiseCatch;
- last_frame_was_promise_builtin = true;
- } else if (*code == *BUILTIN_CODE(isolate_, PromisePrototypeFinally)) {
- type = debug::kDebugPromiseFinally;
- last_frame_was_promise_builtin = true;
+ if (info->HasBuiltinId()) {
+ if (info->builtin_id() == Builtins::kAsyncFunctionPromiseCreate) {
+ type = debug::kDebugAsyncFunctionPromiseCreated;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeThen) {
+ type = debug::kDebugPromiseThen;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeCatch) {
+ type = debug::kDebugPromiseCatch;
+ last_frame_was_promise_builtin = true;
+ } else if (info->builtin_id() == Builtins::kPromisePrototypeFinally) {
+ type = debug::kDebugPromiseFinally;
+ last_frame_was_promise_builtin = true;
+ }
}
}
it.Advance();
@@ -1864,9 +2009,8 @@ bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
DCHECK(shared->script()->IsScript());
Handle<Script> script(Script::cast(shared->script()));
DCHECK(script->IsUserJavaScript());
- debug::Location start =
- GetDebugLocation(script, shared->start_position());
- debug::Location end = GetDebugLocation(script, shared->end_position());
+ debug::Location start = GetDebugLocation(script, shared->StartPosition());
+ debug::Location end = GetDebugLocation(script, shared->EndPosition());
is_blackboxed = debug_delegate_->IsFunctionBlackboxed(
ToApiHandle<debug::Script>(script), start, end);
}
@@ -1886,7 +2030,7 @@ bool Debug::AllFramesOnStackAreBlackboxed() {
bool Debug::CanBreakAtEntry(Handle<SharedFunctionInfo> shared) {
// Allow break at entry for builtin functions.
- if (shared->native()) {
+ if (shared->native() || shared->IsApiFunction()) {
// Functions that are subject to debugging can have regular breakpoints.
DCHECK(!shared->IsSubjectToDebugging());
return true;
@@ -2003,9 +2147,9 @@ void Debug::UpdateState() {
void Debug::UpdateHookOnFunctionCall() {
STATIC_ASSERT(LastStepAction == StepIn);
- hook_on_function_call_ = thread_local_.last_step_action_ == StepIn ||
- isolate_->needs_side_effect_check();
- DCHECK_IMPLIES(hook_on_function_call_, is_active_);
+ hook_on_function_call_ =
+ thread_local_.last_step_action_ == StepIn ||
+ isolate_->debug_execution_mode() == DebugInfo::kSideEffects;
}
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
@@ -2174,8 +2318,57 @@ ReturnValueScope::~ReturnValueScope() {
debug_->set_return_value(*return_value_);
}
+void Debug::StartSideEffectCheckMode() {
+ DCHECK(isolate_->debug_execution_mode() != DebugInfo::kSideEffects);
+ isolate_->set_debug_execution_mode(DebugInfo::kSideEffects);
+ UpdateHookOnFunctionCall();
+ side_effect_check_failed_ = false;
+
+ DCHECK(!temporary_objects_);
+ temporary_objects_.reset(new TemporaryObjectsTracker());
+ isolate_->heap()->AddHeapObjectAllocationTracker(temporary_objects_.get());
+}
+
+void Debug::StopSideEffectCheckMode() {
+ DCHECK(isolate_->debug_execution_mode() == DebugInfo::kSideEffects);
+ if (side_effect_check_failed_) {
+ DCHECK(isolate_->has_pending_exception());
+ DCHECK_EQ(isolate_->heap()->termination_exception(),
+ isolate_->pending_exception());
+ // Convert the termination exception into a regular exception.
+ isolate_->CancelTerminateExecution();
+ isolate_->Throw(*isolate_->factory()->NewEvalError(
+ MessageTemplate::kNoSideEffectDebugEvaluate));
+ }
+ isolate_->set_debug_execution_mode(DebugInfo::kBreakpoints);
+ UpdateHookOnFunctionCall();
+ side_effect_check_failed_ = false;
+
+ DCHECK(temporary_objects_);
+ isolate_->heap()->RemoveHeapObjectAllocationTracker(temporary_objects_.get());
+ temporary_objects_.reset();
+}
+
+void Debug::ApplySideEffectChecks(Handle<DebugInfo> debug_info) {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray());
+ DebugEvaluate::ApplySideEffectChecks(debug_bytecode);
+ debug_info->SetDebugExecutionMode(DebugInfo::kSideEffects);
+}
+
+void Debug::ClearSideEffectChecks(Handle<DebugInfo> debug_info) {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ Handle<BytecodeArray> debug_bytecode(debug_info->DebugBytecodeArray());
+ Handle<BytecodeArray> original(debug_info->OriginalBytecodeArray());
+ for (interpreter::BytecodeArrayIterator it(debug_bytecode); !it.done();
+ it.Advance()) {
+ debug_bytecode->set(it.current_offset(),
+ original->get(it.current_offset()));
+ }
+}
+
bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
- DCHECK(isolate_->needs_side_effect_check());
+ DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
DisallowJavascriptExecution no_js(isolate_);
if (!function->is_compiled() &&
!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
@@ -2191,12 +2384,28 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
isolate_->TerminateExecution();
return false;
}
+ // If function has bytecode array then prepare function for debug execution
+ // to perform runtime side effect checks.
+ if (function->shared()->requires_runtime_side_effect_checks()) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ DCHECK(shared->is_compiled());
+ if (shared->GetCode() ==
+ isolate_->builtins()->builtin(Builtins::kDeserializeLazy)) {
+ Snapshot::EnsureBuiltinIsDeserialized(isolate_, shared);
+ }
+ GetOrCreateDebugInfo(shared);
+ PrepareFunctionForDebugExecution(shared);
+ }
return true;
}
-bool Debug::PerformSideEffectCheckForCallback(Address function) {
- DCHECK(isolate_->needs_side_effect_check());
- if (DebugEvaluate::CallbackHasNoSideEffect(function)) return true;
+bool Debug::PerformSideEffectCheckForCallback(Handle<Object> callback_info) {
+ DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
+ // TODO(7515): always pass a valid callback info object.
+ if (!callback_info.is_null() &&
+ DebugEvaluate::CallbackHasNoSideEffect(*callback_info)) {
+ return true;
+ }
side_effect_check_failed_ = true;
// Throw an uncatchable termination exception.
isolate_->TerminateExecution();
@@ -2204,6 +2413,44 @@ bool Debug::PerformSideEffectCheckForCallback(Address function) {
return false;
}
+bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
+ using interpreter::Bytecode;
+
+ DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
+ SharedFunctionInfo* shared = frame->function()->shared();
+ BytecodeArray* bytecode_array = shared->GetBytecodeArray();
+ int offset = frame->GetBytecodeOffset();
+ interpreter::BytecodeArrayAccessor bytecode_accessor(handle(bytecode_array),
+ offset);
+
+ Bytecode bytecode = bytecode_accessor.current_bytecode();
+ interpreter::Register reg;
+ switch (bytecode) {
+ case Bytecode::kStaCurrentContextSlot:
+ reg = interpreter::Register::current_context();
+ break;
+ default:
+ reg = bytecode_accessor.GetRegisterOperand(0);
+ break;
+ }
+ Handle<Object> object =
+ handle(frame->ReadInterpreterRegister(reg.index()), isolate_);
+ if (object->IsHeapObject()) {
+ Address address = Handle<HeapObject>::cast(object)->address();
+ if (temporary_objects_->HasObject(address)) {
+ return true;
+ }
+ }
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] %s failed runtime side effect check.\n",
+ interpreter::Bytecodes::ToString(bytecode));
+ }
+ side_effect_check_failed_ = true;
+ // Throw an uncatchable termination exception.
+ isolate_->TerminateExecution();
+ return false;
+}
+
void LegacyDebugDelegate::PromiseEventOccurred(
v8::debug::PromiseDebugActionType type, int id, bool is_blackboxed) {
DebugScope debug_scope(isolate_->debug());
@@ -2314,21 +2561,5 @@ void NativeDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
CHECK(!isolate->has_scheduled_exception());
}
-NoSideEffectScope::~NoSideEffectScope() {
- if (isolate_->needs_side_effect_check() &&
- isolate_->debug()->side_effect_check_failed_) {
- DCHECK(isolate_->has_pending_exception());
- DCHECK_EQ(isolate_->heap()->termination_exception(),
- isolate_->pending_exception());
- // Convert the termination exception into a regular exception.
- isolate_->CancelTerminateExecution();
- isolate_->Throw(*isolate_->factory()->NewEvalError(
- MessageTemplate::kNoSideEffectDebugEvaluate));
- }
- isolate_->set_needs_side_effect_check(old_needs_side_effect_check_);
- isolate_->debug()->UpdateHookOnFunctionCall();
- isolate_->debug()->side_effect_check_failed_ = false;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 4ea9c2b872..035a480919 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -15,10 +15,10 @@
#include "src/debug/debug-interface.h"
#include "src/debug/interface-types.h"
#include "src/execution.h"
-#include "src/factory.h"
#include "src/flags.h"
#include "src/frames.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
#include "src/source-position-table.h"
@@ -225,8 +225,8 @@ class Debug {
void OnCompileError(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
- Handle<Object> data);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
+ Handle<Object> data);
Handle<Context> GetDebugContext();
void HandleDebugBreak(IgnoreBreakMode ignore_break_mode);
@@ -242,22 +242,21 @@ class Debug {
// Break point handling.
bool SetBreakPoint(Handle<JSFunction> function,
Handle<BreakPoint> break_point, int* source_position);
- bool SetBreakPointForScript(Handle<Script> script,
- Handle<BreakPoint> break_point,
- int* source_position);
void ClearBreakPoint(Handle<BreakPoint> break_point);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
- bool SetBreakpoint(Handle<Script> script, Handle<String> condition,
- int* offset, int* id);
+ bool SetBreakPointForScript(Handle<Script> script, Handle<String> condition,
+ int* source_position, int* id);
+ bool SetBreakpointForFunction(Handle<JSFunction> function,
+ Handle<String> condition, int* id);
void RemoveBreakpoint(int id);
- // The parameter is either a BreakPoint object, or a FixedArray of
- // BreakPoint objects.
- // Returns an empty handle if no breakpoint is hit, or a FixedArray with all
+ // Find breakpoints from the debug info and the break location and check
+ // whether they are hit. Return an empty handle if not, or a FixedArray with
// hit BreakPoint objects.
- MaybeHandle<FixedArray> GetHitBreakPoints(Handle<Object> break_points);
+ MaybeHandle<FixedArray> GetHitBreakPoints(Handle<DebugInfo> debug_info,
+ int position);
// Stepping handling.
void PrepareStep(StepAction step_action);
@@ -268,7 +267,8 @@ class Debug {
void ClearStepOut();
void DeoptimizeFunction(Handle<SharedFunctionInfo> shared);
- void PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
+ void PrepareFunctionForDebugExecution(Handle<SharedFunctionInfo> shared);
+ void InstallDebugBreakTrampoline();
bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
int end_position, bool restrict_to_function,
std::vector<BreakLocation>* locations);
@@ -336,8 +336,18 @@ class Debug {
return is_active() && !debug_context().is_null() && break_id() != 0;
}
+ // Apply proper instrumentation depends on debug_execution_mode.
+ void ApplyInstrumentation(Handle<SharedFunctionInfo> shared);
+
+ void StartSideEffectCheckMode();
+ void StopSideEffectCheckMode();
+
+ void ApplySideEffectChecks(Handle<DebugInfo> debug_info);
+ void ClearSideEffectChecks(Handle<DebugInfo> debug_info);
+
bool PerformSideEffectCheck(Handle<JSFunction> function);
- bool PerformSideEffectCheckForCallback(Address function);
+ bool PerformSideEffectCheckForCallback(Handle<Object> callback_info);
+ bool PerformSideEffectCheckAtBytecode(InterpretedFrame* frame);
// Flags and states.
DebugScope* debugger_entry() {
@@ -403,7 +413,7 @@ class Debug {
private:
explicit Debug(Isolate* isolate);
- ~Debug() { DCHECK_NULL(debug_delegate_); }
+ ~Debug();
void UpdateState();
void UpdateHookOnFunctionCall();
@@ -417,7 +427,8 @@ class Debug {
int CurrentFrameCount();
inline bool ignore_events() const {
- return is_suppressed_ || !is_active_ || isolate_->needs_side_effect_check();
+ return is_suppressed_ || !is_active_ ||
+ isolate_->debug_execution_mode() == DebugInfo::kSideEffects;
}
inline bool break_disabled() const { return break_disabled_; }
@@ -434,14 +445,12 @@ class Debug {
void OnException(Handle<Object> exception, Handle<Object> promise);
// Constructors for debug event objects.
- MUST_USE_RESULT MaybeHandle<Object> MakeExecutionState();
- MUST_USE_RESULT MaybeHandle<Object> MakeExceptionEvent(
- Handle<Object> exception,
- bool uncaught,
- Handle<Object> promise);
- MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeExecutionState();
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeExceptionEvent(
+ Handle<Object> exception, bool uncaught, Handle<Object> promise);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
v8::debug::PromiseDebugActionType type, int id);
void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
@@ -468,7 +477,9 @@ class Debug {
BreakLocation* location,
bool* has_break_points = nullptr);
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
- bool CheckBreakPoint(Handle<BreakPoint> break_point);
+ // Check whether a BreakPoint object is hit. Evaluate condition depending
+ // on whether this is a regular break location or a break at function entry.
+ bool CheckBreakPoint(Handle<BreakPoint> break_point, bool is_break_at_entry);
MaybeHandle<Object> CallFunction(const char* name, int argc,
Handle<Object> args[],
bool catch_exceptions = true);
@@ -520,6 +531,10 @@ class Debug {
// List of active debug info objects.
DebugInfoListNode* debug_info_list_;
+ // Used for side effect check to mark temporary objects.
+ class TemporaryObjectsTracker;
+ std::unique_ptr<TemporaryObjectsTracker> temporary_objects_;
+
// Used to collect histogram data on debugger feature usage.
DebugFeatureTracker feature_tracker_;
@@ -698,9 +713,9 @@ class ReturnValueScope {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(Debug* debug)
+ explicit DisableBreak(Debug* debug, bool disable = true)
: debug_(debug), previous_break_disabled_(debug->break_disabled_) {
- debug_->break_disabled_ = true;
+ debug_->break_disabled_ = disable;
}
~DisableBreak() {
debug_->break_disabled_ = previous_break_disabled_;
@@ -727,24 +742,6 @@ class SuppressDebug BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SuppressDebug);
};
-class NoSideEffectScope {
- public:
- NoSideEffectScope(Isolate* isolate, bool disallow_side_effects)
- : isolate_(isolate),
- old_needs_side_effect_check_(isolate->needs_side_effect_check()) {
- isolate->set_needs_side_effect_check(old_needs_side_effect_check_ ||
- disallow_side_effects);
- isolate->debug()->UpdateHookOnFunctionCall();
- isolate->debug()->side_effect_check_failed_ = false;
- }
- ~NoSideEffectScope();
-
- private:
- Isolate* isolate_;
- bool old_needs_side_effect_check_;
- DISALLOW_COPY_AND_ASSIGN(NoSideEffectScope);
-};
-
// Code generator routines.
class DebugCodegen : public AllStatic {
public:
@@ -759,6 +756,9 @@ class DebugCodegen : public AllStatic {
// Builtin to atomically (wrt deopts) handle debugger statement and
// drop frames to restart function if necessary.
static void GenerateHandleDebuggerStatement(MacroAssembler* masm);
+
+ // Builtin to trigger a debug break before entering the function.
+ static void GenerateDebugBreakTrampoline(MacroAssembler* masm);
};
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 4f53f8554f..68be1a39dd 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -824,15 +824,14 @@ void LiveEdit::ReplaceFunctionCode(
compile_info_wrapper.GetSharedFunctionInfo();
if (shared_info->is_compiled()) {
- // Take whatever code we can get from the new shared function info. We
- // expect activations of neither the old bytecode nor old FCG code, since
- // the lowest activation is going to be restarted.
- Handle<Code> old_code(shared_info->code());
- Handle<Code> new_code(new_shared_info->code());
// Clear old bytecode. This will trigger self-healing if we do not install
// new bytecode.
- shared_info->ClearBytecodeArray();
- shared_info->set_bytecode_array(new_shared_info->bytecode_array());
+ shared_info->FlushCompiled();
+ if (new_shared_info->HasInterpreterData()) {
+ shared_info->set_interpreter_data(new_shared_info->interpreter_data());
+ } else {
+ shared_info->set_bytecode_array(new_shared_info->GetBytecodeArray());
+ }
if (shared_info->HasBreakInfo()) {
// Existing break points will be re-applied. Reset the debug info here.
@@ -840,21 +839,22 @@ void LiveEdit::ReplaceFunctionCode(
handle(shared_info->GetDebugInfo()));
}
shared_info->set_scope_info(new_shared_info->scope_info());
- shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
+ shared_info->set_feedback_metadata(new_shared_info->feedback_metadata());
shared_info->DisableOptimization(BailoutReason::kLiveEdit);
- // Update the type feedback vector, if needed.
- Handle<FeedbackMetadata> new_feedback_metadata(
- new_shared_info->feedback_metadata());
- shared_info->set_feedback_metadata(*new_feedback_metadata);
} else {
- shared_info->set_feedback_metadata(
- FeedbackMetadata::cast(isolate->heap()->empty_fixed_array()));
+ // There should not be any feedback metadata. Keep the outer scope info the
+ // same.
+ DCHECK(!shared_info->HasFeedbackMetadata());
}
int start_position = compile_info_wrapper.GetStartPosition();
int end_position = compile_info_wrapper.GetEndPosition();
- shared_info->set_start_position(start_position);
- shared_info->set_end_position(end_position);
+ // TODO(cbruni): only store position information on the SFI.
+ shared_info->set_raw_start_position(start_position);
+ shared_info->set_raw_end_position(end_position);
+ if (shared_info->scope_info()->HasPositionInfo()) {
+ shared_info->scope_info()->SetPositionInfo(start_position, end_position);
+ }
FeedbackVectorFixer::PatchFeedbackVector(&compile_info_wrapper, shared_info,
isolate);
@@ -873,9 +873,9 @@ void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
Isolate* isolate = script->GetIsolate();
- Handle<FixedArray> old_infos(script->shared_function_infos(), isolate);
- Handle<FixedArray> new_infos(
- isolate->factory()->NewFixedArray(max_function_literal_id + 1));
+ Handle<WeakFixedArray> old_infos(script->shared_function_infos(), isolate);
+ Handle<WeakFixedArray> new_infos(
+ isolate->factory()->NewWeakFixedArray(max_function_literal_id + 1));
script->set_shared_function_infos(*new_infos);
SharedFunctionInfo::ScriptIterator iterator(isolate, old_infos);
while (SharedFunctionInfo* shared = iterator.Next()) {
@@ -883,7 +883,7 @@ void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
// as we severed the link from the Script to the SharedFunctionInfo above.
Handle<SharedFunctionInfo> info(shared, isolate);
info->set_script(isolate->heap()->undefined_value());
- Handle<Object> new_noscript_list = WeakFixedArray::Add(
+ Handle<Object> new_noscript_list = FixedArrayOfWeakCells::Add(
isolate->factory()->noscript_shared_function_infos(), info);
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*new_noscript_list);
@@ -975,20 +975,25 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
- int old_function_start = info->start_position();
+ int old_function_start = info->StartPosition();
int new_function_start = TranslatePosition(old_function_start,
position_change_array);
- int new_function_end = TranslatePosition(info->end_position(),
- position_change_array);
+ int new_function_end =
+ TranslatePosition(info->EndPosition(), position_change_array);
int new_function_token_pos =
TranslatePosition(info->function_token_position(), position_change_array);
- info->set_start_position(new_function_start);
- info->set_end_position(new_function_end);
+ info->set_raw_start_position(new_function_start);
+ info->set_raw_end_position(new_function_end);
+ // TODO(cbruni): Allocate helper ScopeInfo once the position fields are gone
+ // on the SFI.
+ if (info->scope_info()->HasPositionInfo()) {
+ info->scope_info()->SetPositionInfo(new_function_start, new_function_end);
+ }
info->set_function_token_position(new_function_token_pos);
if (info->HasBytecodeArray()) {
- TranslateSourcePositionTable(handle(info->bytecode_array()),
+ TranslateSourcePositionTable(handle(info->GetBytecodeArray()),
position_change_array);
}
if (info->HasBreakInfo()) {
@@ -1014,7 +1019,7 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
original->eval_from_shared_or_wrapped_arguments());
copy->set_eval_from_position(original->eval_from_position());
- Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
+ Handle<WeakFixedArray> infos(isolate->factory()->NewWeakFixedArray(
original->shared_function_infos()->length()));
copy->set_shared_function_infos(*infos);
@@ -1061,7 +1066,7 @@ void LiveEdit::ReplaceRefToNestedFunction(
Handle<SharedFunctionInfo> subst_shared =
UnwrapSharedFunctionInfoFromJSValue(subst_function_wrapper);
- for (RelocIterator it(parent_shared->code()); !it.done(); it.next()) {
+ for (RelocIterator it(parent_shared->GetCode()); !it.done(); it.next()) {
if (it.rinfo()->rmode() == RelocInfo::EMBEDDED_OBJECT) {
if (it.rinfo()->target_object() == *orig_shared) {
it.rinfo()->set_target_object(*subst_shared);
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index db599b77e6..120b9c87eb 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -76,9 +76,8 @@ class LiveEdit : AllStatic {
public:
static void InitializeThreadLocal(Debug* debug);
- MUST_USE_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
- Handle<Script> script,
- Handle<String> source);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
+ Handle<Script> script, Handle<String> source);
static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array);
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
index 6c68e52a23..db0aaf8177 100644
--- a/deps/v8/src/debug/liveedit.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -919,7 +919,7 @@
function CopyErrorPositionToDetails(e, details) {
function createPositionStruct(script, position) {
if (position == -1) return;
- var location = script.locationFromPosition(position, true);
+ var location = %ScriptPositionInfo(script, position, true);
if (location == null) return;
return {
line: location.line + 1,
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 85052b3cae..99b6414f3a 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -2287,7 +2287,7 @@ ScriptMirror.prototype.value = function() {
ScriptMirror.prototype.name = function() {
- return this.script_.name || this.script_.nameOrSourceURL();
+ return this.script_.name || this.script_.source_url;
};
@@ -2339,7 +2339,7 @@ ScriptMirror.prototype.lineCount = function() {
ScriptMirror.prototype.locationFromPosition = function(
position, include_resource_offset) {
- return this.script_.locationFromPosition(position, include_resource_offset);
+ return %ScriptPositionInfo(this.script_, position, !!include_resource_offset);
};
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 644bd29796..853223489a 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -100,7 +100,9 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
for (auto it = translated_values.begin(); it != translated_values.end();
it++) {
if (it->kind() == TranslatedFrame::kInterpretedFunction ||
- it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ it->kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
if (counter == 0) {
frame_it = it;
break;
@@ -154,7 +156,7 @@ class ActivationsFinder : public ThreadVisitor {
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
// Replace the current pc on the stack with the trampoline.
- it.frame()->set_pc(code->instruction_start() + trampoline_pc);
+ it.frame()->set_pc(code->raw_instruction_start() + trampoline_pc);
}
}
}
@@ -498,7 +500,7 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
CHECK_LE(type, kLastBailoutType);
CHECK_NOT_NULL(data->deopt_entry_code_[type]);
Code* code = data->deopt_entry_code_[type];
- return code->instruction_start() + (id * table_entry_size_);
+ return code->raw_instruction_start() + (id * table_entry_size_);
}
@@ -509,7 +511,7 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
CHECK_LE(type, kLastBailoutType);
Code* code = data->deopt_entry_code_[type];
if (code == nullptr) return kNotDeoptimizationEntry;
- Address start = code->instruction_start();
+ Address start = code->raw_instruction_start();
if (addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
@@ -546,9 +548,13 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
- HandlerTable table(translated_frame->raw_shared_info()->bytecode_array());
+ HandlerTable table(
+ translated_frame->raw_shared_info()->GetBytecodeArray());
return table.LookupRange(bytecode_offset, data_out, nullptr);
}
+ case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: {
+ return 0;
+ }
default:
break;
}
@@ -653,10 +659,11 @@ void Deoptimizer::DoComputeOutputFrames() {
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
+ bool handle_exception = deoptimizing_throw_ && i == count - 1;
switch (translated_frame->kind()) {
case TranslatedFrame::kInterpretedFunction:
DoComputeInterpretedFrame(translated_frame, frame_index,
- deoptimizing_throw_ && i == count - 1);
+ handle_exception);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
@@ -666,10 +673,19 @@ void Deoptimizer::DoComputeOutputFrames() {
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kBuiltinContinuation:
- DoComputeBuiltinContinuation(translated_frame, frame_index, false);
+ DoComputeBuiltinContinuation(translated_frame, frame_index,
+ BuiltinContinuationMode::STUB);
break;
case TranslatedFrame::kJavaScriptBuiltinContinuation:
- DoComputeBuiltinContinuation(translated_frame, frame_index, true);
+ DoComputeBuiltinContinuation(translated_frame, frame_index,
+ BuiltinContinuationMode::JAVASCRIPT);
+ break;
+ case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch:
+ DoComputeBuiltinContinuation(
+ translated_frame, frame_index,
+ handle_exception
+ ? BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION
+ : BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
@@ -876,7 +892,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
output_offset -= kPointerSize;
Object* bytecode_array = shared->HasBreakInfo()
? shared->GetDebugInfo()->DebugBytecodeArray()
- : shared->bytecode_array();
+ : shared->GetBytecodeArray();
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
"bytecode array ");
@@ -1342,6 +1358,63 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
}
}
+bool Deoptimizer::BuiltinContinuationModeIsJavaScript(
+ BuiltinContinuationMode mode) {
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ return false;
+ case BuiltinContinuationMode::JAVASCRIPT:
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool Deoptimizer::BuiltinContinuationModeIsWithCatch(
+ BuiltinContinuationMode mode) {
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ case BuiltinContinuationMode::JAVASCRIPT:
+ return false;
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+StackFrame::Type Deoptimizer::BuiltinContinuationModeToFrameType(
+ BuiltinContinuationMode mode) {
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ return StackFrame::BUILTIN_CONTINUATION;
+ case BuiltinContinuationMode::JAVASCRIPT:
+ return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION;
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ return StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
+ }
+ UNREACHABLE();
+}
+
+Builtins::Name Deoptimizer::TrampolineForBuiltinContinuation(
+ BuiltinContinuationMode mode, bool must_handle_result) {
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ return must_handle_result ? Builtins::kContinueToCodeStubBuiltinWithResult
+ : Builtins::kContinueToCodeStubBuiltin;
+ case BuiltinContinuationMode::JAVASCRIPT:
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH:
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION:
+ return must_handle_result
+ ? Builtins::kContinueToJavaScriptBuiltinWithResult
+ : Builtins::kContinueToJavaScriptBuiltin;
+ }
+ UNREACHABLE();
+}
+
// BuiltinContinuationFrames capture the machine state that is expected as input
// to a builtin, including both input register values and stack parameters. When
// the frame is reactivated (i.e. the frame below it returns), a
@@ -1363,77 +1436,104 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// TO
// | .... |
// +-------------------------+
+// | arg padding (arch dept) |<- at most 1*kPointerSize
+// +-------------------------+
// | builtin param 0 |<- FrameState input value n becomes
// +-------------------------+
// | ... |
// +-------------------------+
// | builtin param m |<- FrameState input value n+m-1, or in
-// +-------------------------+ the LAZY case, return LAZY result value
+// +-----needs-alignment-----+ the LAZY case, return LAZY result value
// | ContinueToBuiltin entry |
// +-------------------------+
// | | saved frame (FP) |
-// | +=========================+<- fpreg
+// | +=====needs=alignment=====+<- fpreg
// | |constant pool (if ool_cp)|
// v +-------------------------+
// |BUILTIN_CONTINUATION mark|
// +-------------------------+
-// | JS Builtin code object |
+// | JSFunction (or zero) |<- only if JavaScript builtin
+// +-------------------------+
+// | frame height above FP |
+// +-------------------------+
+// | context |<- this non-standard context slot contains
+// +-------------------------+ the context, even for non-JS builtins.
+// | builtin address |
// +-------------------------+
// | builtin input GPR reg0 |<- populated from deopt FrameState using
// +-------------------------+ the builtin's CallInterfaceDescriptor
// | ... | to map a FrameState's 0..n-1 inputs to
// +-------------------------+ the builtin's n input register params.
// | builtin input GPR regn |
-// |-------------------------|<- spreg
+// +-------------------------+
+// | reg padding (arch dept) |
+// +-----needs--alignment----+
+// | res padding (arch dept) |<- only if {is_topmost}; result is pop'd by
+// +-------------------------+<- kNotifyDeopt ASM stub and moved to acc
+// | result value |<- reg, as ContinueToBuiltin stub expects.
+// +-----needs-alignment-----+<- spreg
//
void Deoptimizer::DoComputeBuiltinContinuation(
TranslatedFrame* translated_frame, int frame_index,
- bool java_script_builtin) {
+ BuiltinContinuationMode mode) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
// The output frame must have room for all of the parameters that need to be
// passed to the builtin continuation.
- int height_in_words = translated_frame->height();
+ const int height_in_words = translated_frame->height();
BailoutId bailout_id = translated_frame->node_id();
Builtins::Name builtin_name = Builtins::GetBuiltinFromBailoutId(bailout_id);
- DCHECK(!Builtins::IsLazy(builtin_name));
+ CHECK(!Builtins::IsLazy(builtin_name));
Code* builtin = isolate()->builtins()->builtin(builtin_name);
Callable continuation_callable =
Builtins::CallableFor(isolate(), builtin_name);
CallInterfaceDescriptor continuation_descriptor =
continuation_callable.descriptor();
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
- bool must_handle_result = !is_topmost || bailout_type_ == LAZY;
+ const bool is_bottommost = (0 == frame_index);
+ const bool is_topmost = (output_count_ - 1 == frame_index);
+ const bool must_handle_result = !is_topmost || bailout_type_ == LAZY;
const RegisterConfiguration* config(RegisterConfiguration::Default());
- int allocatable_register_count = config->num_allocatable_general_registers();
- int padding_slot_count = BuiltinContinuationFrameConstants::PaddingSlotCount(
- allocatable_register_count);
+ const int allocatable_register_count =
+ config->num_allocatable_general_registers();
+ const int padding_slot_count =
+ BuiltinContinuationFrameConstants::PaddingSlotCount(
+ allocatable_register_count);
- int register_parameter_count =
+ const int register_parameter_count =
continuation_descriptor.GetRegisterParameterCount();
// Make sure to account for the context by removing it from the register
// parameter count.
- int stack_param_count = height_in_words - register_parameter_count - 1;
- if (must_handle_result) stack_param_count++;
- unsigned output_frame_size =
- kPointerSize * (stack_param_count + allocatable_register_count +
- padding_slot_count) +
- BuiltinContinuationFrameConstants::kFixedFrameSize;
+ const int translated_stack_parameters =
+ height_in_words - register_parameter_count - 1;
+ const int stack_param_count =
+ translated_stack_parameters + (must_handle_result ? 1 : 0) +
+ (BuiltinContinuationModeIsWithCatch(mode) ? 1 : 0);
+ const int stack_param_pad_count =
+ ShouldPadArguments(stack_param_count) ? 1 : 0;
// If the builtins frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of callback function to the
// top of the reconstructed stack and popping it in
// {Builtins::kNotifyDeoptimized}.
- if (is_topmost) {
- output_frame_size += kPointerSize;
- if (PadTopOfStackRegister()) output_frame_size += kPointerSize;
- }
+ const int push_result_count =
+ is_topmost ? (PadTopOfStackRegister() ? 2 : 1) : 0;
+
+ const unsigned output_frame_size =
+ kPointerSize * (stack_param_count + stack_param_pad_count +
+ allocatable_register_count + padding_slot_count +
+ push_result_count) +
+ BuiltinContinuationFrameConstants::kFixedFrameSize;
+
+ const unsigned output_frame_size_above_fp =
+ kPointerSize * (allocatable_register_count + padding_slot_count +
+ push_result_count) +
+ (BuiltinContinuationFrameConstants::kFixedFrameSize -
+ BuiltinContinuationFrameConstants::kFixedFrameSizeAboveFp);
// Validate types of parameters. They must all be tagged except for argc for
// JS builtins.
@@ -1451,7 +1551,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
CHECK(IsAnyTagged(type.representation()));
}
}
- CHECK_EQ(java_script_builtin, has_argc);
+ CHECK_EQ(BuiltinContinuationModeIsJavaScript(mode), has_argc);
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(),
@@ -1462,10 +1562,6 @@ void Deoptimizer::DoComputeBuiltinContinuation(
stack_param_count);
}
- int translated_stack_parameters =
- must_handle_result ? stack_param_count - 1 : stack_param_count;
-
- if (ShouldPadArguments(stack_param_count)) output_frame_size += kPointerSize;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, stack_param_count);
output_[frame_index] = output_frame;
@@ -1511,6 +1607,27 @@ void Deoptimizer::DoComputeBuiltinContinuation(
output_frame_offset);
}
+ switch (mode) {
+ case BuiltinContinuationMode::STUB:
+ break;
+ case BuiltinContinuationMode::JAVASCRIPT:
+ break;
+ case BuiltinContinuationMode::JAVASCRIPT_WITH_CATCH: {
+ output_frame_offset -= kPointerSize;
+ WriteValueToOutput(isolate()->heap()->the_hole_value(), input_index,
+ frame_index, output_frame_offset,
+ "placeholder for exception on lazy deopt ");
+ } break;
+ case BuiltinContinuationMode::JAVASCRIPT_HANDLE_EXCEPTION: {
+ output_frame_offset -= kPointerSize;
+ intptr_t accumulator_value =
+ input_->GetRegister(kInterpreterAccumulatorRegister.code());
+ WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
+ frame_index, output_frame_offset,
+ "exception (from accumulator)");
+ } break;
+ }
+
if (must_handle_result) {
output_frame_offset -= kPointerSize;
WriteValueToOutput(isolate()->heap()->the_hole_value(), input_index,
@@ -1535,7 +1652,8 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// instruction selector).
Object* context = value_iterator->GetRawValue();
value = reinterpret_cast<intptr_t>(context);
- register_values[kContextRegister.code()] = {context, value_iterator};
+ const RegisterValue context_register_value = {context, value_iterator};
+ register_values[kContextRegister.code()] = context_register_value;
output_frame->SetContext(value);
output_frame->SetRegister(kContextRegister.code(), value);
++input_index;
@@ -1560,11 +1678,13 @@ void Deoptimizer::DoComputeBuiltinContinuation(
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_frame_offset, value);
- intptr_t fp_value = top_address + output_frame_offset;
+ const intptr_t fp_value = top_address + output_frame_offset;
output_frame->SetFp(fp_value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's fp\n");
+ DCHECK_EQ(output_frame_size_above_fp, output_frame_offset);
+
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_frame_offset -= kPointerSize;
@@ -1581,21 +1701,43 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// A marker value is used in place of the context.
output_frame_offset -= kPointerSize;
intptr_t marker =
- java_script_builtin
- ? StackFrame::TypeToMarker(
- StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION)
- : StackFrame::TypeToMarker(StackFrame::BUILTIN_CONTINUATION);
+ StackFrame::TypeToMarker(BuiltinContinuationModeToFrameType(mode));
+
output_frame->SetFrameSlot(output_frame_offset, marker);
DebugPrintOutputSlot(marker, frame_index, output_frame_offset,
"context (builtin continuation sentinel)\n");
output_frame_offset -= kPointerSize;
- value = java_script_builtin ? maybe_function : 0;
+ value = BuiltinContinuationModeIsJavaScript(mode) ? maybe_function : 0;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ DebugPrintOutputSlot(
+ value, frame_index, output_frame_offset,
+ BuiltinContinuationModeIsJavaScript(mode) ? "JSFunction\n" : "unused\n");
+
+ // The delta from the SP to the FP; used to reconstruct SP in
+ // Isolate::UnwindAndFindHandler.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(output_frame_size_above_fp));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ DebugPrintOutputSlot(value, frame_index, output_frame_offset,
+ "frame height at deoptimization\n");
+
+ // The context even if this is a stub contininuation frame. We can't use the
+ // usual context slot, because we must store the frame marker there.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(context);
output_frame->SetFrameSlot(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
- java_script_builtin ? "JSFunction\n" : "unused\n");
+ "builtin JavaScript context\n");
+ if (context == isolate_->heap()->arguments_marker()) {
+ Address output_address =
+ reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
+ output_frame_offset;
+ values_to_materialize_.push_back(
+ {output_address, context_register_value.iterator_});
+ }
- // The builtin to continue to
+ // The builtin to continue to.
output_frame_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(builtin);
output_frame->SetFrameSlot(output_frame_offset, value);
@@ -1610,7 +1752,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
output_frame->SetFrameSlot(output_frame_offset, value);
if (trace_scope_ != nullptr) {
ScopedVector<char> str(128);
- if (java_script_builtin &&
+ if (BuiltinContinuationModeIsJavaScript(mode) &&
code == kJavaScriptCallArgCountRegister.code()) {
SNPrintF(
str,
@@ -1673,20 +1815,10 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// Ensure the frame pointer register points to the callee's frame. The builtin
// will build its own frame once we continue to it.
Register fp_reg = JavaScriptFrame::fp_register();
- output_frame->SetRegister(fp_reg.code(), output_[frame_index - 1]->GetFp());
-
- Code* continue_to_builtin =
- java_script_builtin
- ? (must_handle_result
- ? isolate()->builtins()->builtin(
- Builtins::kContinueToJavaScriptBuiltinWithResult)
- : isolate()->builtins()->builtin(
- Builtins::kContinueToJavaScriptBuiltin))
- : (must_handle_result
- ? isolate()->builtins()->builtin(
- Builtins::kContinueToCodeStubBuiltinWithResult)
- : isolate()->builtins()->builtin(
- Builtins::kContinueToCodeStubBuiltin));
+ output_frame->SetRegister(fp_reg.code(), fp_value);
+
+ Code* continue_to_builtin = isolate()->builtins()->builtin(
+ TrampolineForBuiltinContinuation(mode, must_handle_result));
output_frame->SetPc(
reinterpret_cast<intptr_t>(continue_to_builtin->InstructionStart()));
@@ -1698,6 +1830,11 @@ void Deoptimizer::DoComputeBuiltinContinuation(
void Deoptimizer::MaterializeHeapObjects() {
translated_state_.Prepare(reinterpret_cast<Address>(stack_fp_));
+ if (FLAG_deopt_every_n_times > 0) {
+ // Doing a GC here will find problems with the deoptimized frames.
+ isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kTesting);
+ }
for (auto& materialization : values_to_materialize_) {
Handle<Object> value = materialization.value_->GetValue();
@@ -1936,6 +2073,14 @@ void Translation::BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
buffer_->Add(height);
}
+void Translation::BeginJavaScriptBuiltinContinuationWithCatchFrame(
+ BailoutId bailout_id, int literal_id, unsigned height) {
+ buffer_->Add(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME);
+ buffer_->Add(bailout_id.ToInt());
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
+}
+
void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
@@ -2094,6 +2239,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case CONSTRUCT_STUB_FRAME:
case BUILTIN_CONTINUATION_FRAME:
case JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
return 3;
}
FATAL("Unexpected translation type");
@@ -2317,7 +2463,7 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
SharedFunctionInfo* shared, BailoutId node_id) {
DCHECK(shared->HasBytecodeArray());
- return AbstractCode::cast(shared->bytecode_array())
+ return AbstractCode::cast(shared->GetBytecodeArray())
->SourcePosition(node_id.ToInt());
}
@@ -2671,6 +2817,14 @@ TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationFrame(
return frame;
}
+TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+ TranslatedFrame frame(kJavaScriptBuiltinContinuationWithCatch, shared_info,
+ height);
+ frame.node_id_ = bailout_id;
+ return frame;
+}
+
int TranslatedFrame::GetValueCount() {
switch (kind()) {
case kInterpretedFunction: {
@@ -2684,6 +2838,7 @@ int TranslatedFrame::GetValueCount() {
case kConstructStub:
case kBuiltinContinuation:
case kJavaScriptBuiltinContinuation:
+ case kJavaScriptBuiltinContinuationWithCatch:
return 1 + height_;
case kInvalid:
@@ -2792,6 +2947,25 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
return TranslatedFrame::JavaScriptBuiltinContinuationFrame(
bailout_id, shared_info, height_with_context);
}
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
+ BailoutId bailout_id = BailoutId(iterator->Next());
+ SharedFunctionInfo* shared_info =
+ SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ int height = iterator->Next();
+ if (trace_file != nullptr) {
+ std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ PrintF(trace_file,
+ " reading JavaScript builtin continuation frame with catch %s",
+ name.get());
+ PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
+ bailout_id.ToInt(), height);
+ }
+ // Add one to the height to account for the context which was implicitly
+ // added to the translation during code generation.
+ int height_with_context = height + 1;
+ return TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
+ bailout_id, shared_info, height_with_context);
+ }
case Translation::UPDATE_FEEDBACK:
case Translation::BEGIN:
case Translation::DUPLICATED_OBJECT:
@@ -2935,6 +3109,7 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME:
case Translation::BUILTIN_CONTINUATION_FRAME:
case Translation::UPDATE_FEEDBACK:
// Peeled off before getting here.
@@ -3375,6 +3550,16 @@ void TranslatedState::InitializeCapturedObjectAt(
return;
case FIXED_ARRAY_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
case HASH_TABLE_TYPE:
case PROPERTY_ARRAY_TYPE:
case CONTEXT_EXTENSION_TYPE:
@@ -3500,6 +3685,15 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
return MaterializeMutableHeapNumber(frame, &value_index, slot);
case FIXED_ARRAY_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
case HASH_TABLE_TYPE: {
// Check we have the right size.
int array_length =
@@ -3764,7 +3958,9 @@ TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
TranslatedFrame* TranslatedState::GetFrameFromJSFrameIndex(int jsframe_index) {
for (size_t i = 0; i < frames_.size(); i++) {
if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
- frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
if (jsframe_index > 0) {
jsframe_index--;
} else {
@@ -3779,7 +3975,9 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
int jsframe_index, int* args_count) {
for (size_t i = 0; i < frames_.size(); i++) {
if (frames_[i].kind() == TranslatedFrame::kInterpretedFunction ||
- frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ frames_[i].kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
if (jsframe_index > 0) {
jsframe_index--;
} else {
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index e72878654d..8f84c89bee 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -12,7 +12,9 @@
#include "src/base/macros.h"
#include "src/boxed-float.h"
#include "src/deoptimize-reason.h"
+#include "src/feedback-vector.h"
#include "src/frame-constants.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/source-position.h"
#include "src/zone/zone-chunk-list.h"
@@ -150,6 +152,7 @@ class TranslatedFrame {
kConstructStub,
kBuiltinContinuation,
kJavaScriptBuiltinContinuation,
+ kJavaScriptBuiltinContinuationWithCatch,
kInvalid
};
@@ -222,6 +225,8 @@ class TranslatedFrame {
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
static TranslatedFrame JavaScriptBuiltinContinuationFrame(
BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
+ static TranslatedFrame JavaScriptBuiltinContinuationWithCatchFrame(
+ BailoutId bailout_id, SharedFunctionInfo* shared_info, int height);
static TranslatedFrame InvalidFrame() {
return TranslatedFrame(kInvalid, nullptr);
}
@@ -531,8 +536,23 @@ class Deoptimizer : public Malloced {
int frame_index);
void DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index);
+
+ enum class BuiltinContinuationMode {
+ STUB,
+ JAVASCRIPT,
+ JAVASCRIPT_WITH_CATCH,
+ JAVASCRIPT_HANDLE_EXCEPTION
+ };
+ static bool BuiltinContinuationModeIsWithCatch(BuiltinContinuationMode mode);
+ static bool BuiltinContinuationModeIsJavaScript(BuiltinContinuationMode mode);
+ static StackFrame::Type BuiltinContinuationModeToFrameType(
+ BuiltinContinuationMode mode);
+ static Builtins::Name TrampolineForBuiltinContinuation(
+ BuiltinContinuationMode mode, bool must_handle_result);
+
void DoComputeBuiltinContinuation(TranslatedFrame* translated_frame,
- int frame_index, bool java_script_frame);
+ int frame_index,
+ BuiltinContinuationMode mode);
void WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
@@ -866,30 +886,31 @@ class TranslationIterator BASE_EMBEDDED {
int index_;
};
-#define TRANSLATION_OPCODE_LIST(V) \
- V(BEGIN) \
- V(INTERPRETED_FRAME) \
- V(BUILTIN_CONTINUATION_FRAME) \
- V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \
- V(CONSTRUCT_STUB_FRAME) \
- V(ARGUMENTS_ADAPTOR_FRAME) \
- V(DUPLICATED_OBJECT) \
- V(ARGUMENTS_ELEMENTS) \
- V(ARGUMENTS_LENGTH) \
- V(CAPTURED_OBJECT) \
- V(REGISTER) \
- V(INT32_REGISTER) \
- V(UINT32_REGISTER) \
- V(BOOL_REGISTER) \
- V(FLOAT_REGISTER) \
- V(DOUBLE_REGISTER) \
- V(STACK_SLOT) \
- V(INT32_STACK_SLOT) \
- V(UINT32_STACK_SLOT) \
- V(BOOL_STACK_SLOT) \
- V(FLOAT_STACK_SLOT) \
- V(DOUBLE_STACK_SLOT) \
- V(LITERAL) \
+#define TRANSLATION_OPCODE_LIST(V) \
+ V(BEGIN) \
+ V(INTERPRETED_FRAME) \
+ V(BUILTIN_CONTINUATION_FRAME) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) \
+ V(CONSTRUCT_STUB_FRAME) \
+ V(ARGUMENTS_ADAPTOR_FRAME) \
+ V(DUPLICATED_OBJECT) \
+ V(ARGUMENTS_ELEMENTS) \
+ V(ARGUMENTS_LENGTH) \
+ V(CAPTURED_OBJECT) \
+ V(REGISTER) \
+ V(INT32_REGISTER) \
+ V(UINT32_REGISTER) \
+ V(BOOL_REGISTER) \
+ V(FLOAT_REGISTER) \
+ V(DOUBLE_REGISTER) \
+ V(STACK_SLOT) \
+ V(INT32_STACK_SLOT) \
+ V(UINT32_STACK_SLOT) \
+ V(BOOL_STACK_SLOT) \
+ V(FLOAT_STACK_SLOT) \
+ V(DOUBLE_STACK_SLOT) \
+ V(LITERAL) \
V(UPDATE_FEEDBACK)
class Translation BASE_EMBEDDED {
@@ -922,6 +943,9 @@ class Translation BASE_EMBEDDED {
unsigned height);
void BeginJavaScriptBuiltinContinuationFrame(BailoutId bailout_id,
int literal_id, unsigned height);
+ void BeginJavaScriptBuiltinContinuationWithCatchFrame(BailoutId bailout_id,
+ int literal_id,
+ unsigned height);
void ArgumentsElements(CreateArgumentsType type);
void ArgumentsLength(CreateArgumentsType type);
void BeginCapturedObject(int length);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 86cce891ec..263f502a9f 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -48,9 +48,9 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
return v8_buffer_.start();
}
- int offs = static_cast<int>(pc - code_->instruction_start());
+ int offs = static_cast<int>(pc - code_->raw_instruction_start());
// print as code offset, if it seems reasonable
- if (0 <= offs && offs < code_->instruction_size()) {
+ if (0 <= offs && offs < code_->raw_instruction_size()) {
SNPrintF(v8_buffer_, "%p <+0x%x>", static_cast<void*>(pc), offs);
return v8_buffer_.start();
}
@@ -135,6 +135,8 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
out->AddFormatted(" %s, %s, ", Code::Kind2String(kind),
CodeStub::MajorName(major_key));
out->AddFormatted("minor: %d", minor_key);
+ } else if (code->is_builtin()) {
+ out->AddFormatted(" Builtin::%s", Builtins::name(code->builtin_index()));
} else {
out->AddFormatted(" %s", Code::Kind2String(kind));
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 471798dd79..27abf62410 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -6,8 +6,8 @@
#include "src/arguments.h"
#include "src/conversions.h"
-#include "src/factory.h"
#include "src/frames.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -111,8 +111,7 @@ template<ElementsKind Kind> class ElementsKindTraits {
ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
-
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidArrayLength),
Object);
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index a2b8b49c93..385ae85ad1 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -137,8 +137,8 @@ class ElementsAccessor {
virtual uint32_t Push(Handle<JSArray> receiver, Arguments* args,
uint32_t push_size) = 0;
- virtual uint32_t Unshift(Handle<JSArray> receiver,
- Arguments* args, uint32_t unshift_size) = 0;
+ virtual uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) = 0;
virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
uint32_t end) = 0;
@@ -239,9 +239,8 @@ class ElementsAccessor {
void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
bool allow_appending = false);
-MUST_USE_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
- Handle<JSArray> array,
- Arguments* args);
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
+ Handle<JSArray> array, Arguments* args);
// Called directly from CSA.
void CopyFastNumberJSArrayElementsToTypedArray(Context* context,
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index edd329f5da..50e05a0e6e 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -35,7 +35,7 @@ void StackGuard::reset_limits(const ExecutionAccess& lock) {
static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
- if (function->code() == function->shared()->code() &&
+ if (function->code() == function->shared()->GetCode() &&
function->shared()->deserialized()) {
PrintF("[Running deserialized script");
Object* script = function->shared()->script();
@@ -52,7 +52,7 @@ static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
namespace {
-MUST_USE_RESULT MaybeHandle<Object> Invoke(
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(
Isolate* isolate, bool is_construct, Handle<Object> target,
Handle<Object> receiver, int argc, Handle<Object> args[],
Handle<Object> new_target, Execution::MessageHandling message_handling,
@@ -74,11 +74,13 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(
}
#endif
- // api callbacks can be called directly.
+ // api callbacks can be called directly, unless we want to take the detour
+ // through JS to set up a frame for break-at-entry.
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
if ((!is_construct || function->IsConstructor()) &&
- function->shared()->IsApiFunction()) {
+ function->shared()->IsApiFunction() &&
+ !function->shared()->BreakAtEntry()) {
SaveContext save(isolate);
isolate->set_context(function->context());
DCHECK(function->context()->global_object()->IsJSGlobalObject());
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 5030e261d6..95135031cf 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -26,21 +26,18 @@ class Execution final : public AllStatic {
// When the function called is not in strict mode, receiver is
// converted to an object.
//
- V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Call(
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Call(
Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
int argc, Handle<Object> argv[]);
// Construct object from function, the caller supplies an array of
// arguments.
- MUST_USE_RESULT static MaybeHandle<Object> New(Isolate* isolate,
- Handle<Object> constructor,
- int argc,
- Handle<Object> argv[]);
- MUST_USE_RESULT static MaybeHandle<Object> New(Isolate* isolate,
- Handle<Object> constructor,
- Handle<Object> new_target,
- int argc,
- Handle<Object> argv[]);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> New(
+ Isolate* isolate, Handle<Object> constructor, int argc,
+ Handle<Object> argv[]);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> New(
+ Isolate* isolate, Handle<Object> constructor, Handle<Object> new_target,
+ int argc, Handle<Object> argv[]);
// Call a function, just like Call(), but handle don't report exceptions
// externally.
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index c20592dc81..3c3ff92c65 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -5,8 +5,8 @@
#include "src/external-reference-table.h"
#include "src/accessors.h"
-#include "src/assembler.h"
#include "src/counters.h"
+#include "src/external-reference.h"
#include "src/ic/stub-cache.h"
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
@@ -24,25 +24,20 @@ namespace internal {
BUILTIN_LIST_C(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
- ExternalReferenceTable* external_reference_table =
- isolate->external_reference_table();
- if (external_reference_table == nullptr) {
- external_reference_table = new ExternalReferenceTable(isolate);
- isolate->set_external_reference_table(external_reference_table);
- }
- return external_reference_table;
-}
+void ExternalReferenceTable::Init(Isolate* isolate) {
+ int index = 0;
-ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
// nullptr is preserved through serialization/deserialization.
- Add(nullptr, "nullptr");
- AddReferences(isolate);
- AddBuiltins(isolate);
- AddRuntimeFunctions(isolate);
- AddIsolateAddresses(isolate);
- AddAccessors(isolate);
- AddStubCache(isolate);
+ Add(nullptr, "nullptr", &index);
+ AddReferences(isolate, &index);
+ AddBuiltins(isolate, &index);
+ AddRuntimeFunctions(isolate, &index);
+ AddIsolateAddresses(isolate, &index);
+ AddAccessors(isolate, &index);
+ AddStubCache(isolate, &index);
+ is_initialized_ = true;
+
+ CHECK_EQ(kSize, index);
}
const char* ExternalReferenceTable::ResolveSymbol(void* address) {
@@ -58,302 +53,25 @@ const char* ExternalReferenceTable::ResolveSymbol(void* address) {
#endif // SYMBOLIZE_FUNCTION
}
-void ExternalReferenceTable::Add(Address address, const char* name) {
- refs_.emplace_back(address, name);
+void ExternalReferenceTable::Add(Address address, const char* name,
+ int* index) {
+ refs_[(*index)++] = {address, name};
}
-void ExternalReferenceTable::AddReferences(Isolate* isolate) {
- // Miscellaneous
- Add(ExternalReference::roots_array_start(isolate).address(),
- "Heap::roots_array_start()");
- Add(ExternalReference::address_of_stack_limit(isolate).address(),
- "StackGuard::address_of_jslimit()");
- Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
- "StackGuard::address_of_real_jslimit()");
- Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
- "Heap::NewSpaceAllocationLimitAddress()");
- Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
- "Heap::NewSpaceAllocationTopAddress()");
- Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
- "mod_two_doubles");
- Add(ExternalReference::handle_scope_next_address(isolate).address(),
- "HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address(isolate).address(),
- "HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address(isolate).address(),
- "HandleScope::level");
- Add(ExternalReference::new_deoptimizer_function(isolate).address(),
- "Deoptimizer::New()");
- Add(ExternalReference::compute_output_frames_function(isolate).address(),
- "Deoptimizer::ComputeOutputFrames()");
- Add(ExternalReference::address_of_min_int().address(),
- "LDoubleConstant::min_int");
- Add(ExternalReference::address_of_one_half().address(),
- "LDoubleConstant::one_half");
- Add(ExternalReference::isolate_address(isolate).address(), "isolate");
- Add(ExternalReference::builtins_address(isolate).address(), "builtins");
- Add(ExternalReference::handle_scope_implementer_address(isolate).address(),
- "Isolate::handle_scope_implementer_address");
- Add(ExternalReference::pending_microtask_count_address(isolate).address(),
- "Isolate::pending_microtask_count_address()");
- Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
- "Interpreter::dispatch_table_address");
- Add(ExternalReference::bytecode_size_table_address(isolate).address(),
- "Bytecodes::bytecode_size_table_address");
- Add(ExternalReference::address_of_negative_infinity().address(),
- "LDoubleConstant::negative_infinity");
- Add(ExternalReference::power_double_double_function(isolate).address(),
- "power_double_double_function");
- Add(ExternalReference::ieee754_acos_function(isolate).address(),
- "base::ieee754::acos");
- Add(ExternalReference::ieee754_acosh_function(isolate).address(),
- "base::ieee754::acosh");
- Add(ExternalReference::ieee754_asin_function(isolate).address(),
- "base::ieee754::asin");
- Add(ExternalReference::ieee754_asinh_function(isolate).address(),
- "base::ieee754::asinh");
- Add(ExternalReference::ieee754_atan_function(isolate).address(),
- "base::ieee754::atan");
- Add(ExternalReference::ieee754_atanh_function(isolate).address(),
- "base::ieee754::atanh");
- Add(ExternalReference::ieee754_atan2_function(isolate).address(),
- "base::ieee754::atan2");
- Add(ExternalReference::ieee754_cbrt_function(isolate).address(),
- "base::ieee754::cbrt");
- Add(ExternalReference::ieee754_cos_function(isolate).address(),
- "base::ieee754::cos");
- Add(ExternalReference::ieee754_cosh_function(isolate).address(),
- "base::ieee754::cosh");
- Add(ExternalReference::ieee754_exp_function(isolate).address(),
- "base::ieee754::exp");
- Add(ExternalReference::ieee754_expm1_function(isolate).address(),
- "base::ieee754::expm1");
- Add(ExternalReference::ieee754_log_function(isolate).address(),
- "base::ieee754::log");
- Add(ExternalReference::ieee754_log1p_function(isolate).address(),
- "base::ieee754::log1p");
- Add(ExternalReference::ieee754_log10_function(isolate).address(),
- "base::ieee754::log10");
- Add(ExternalReference::ieee754_log2_function(isolate).address(),
- "base::ieee754::log2");
- Add(ExternalReference::ieee754_sin_function(isolate).address(),
- "base::ieee754::sin");
- Add(ExternalReference::ieee754_sinh_function(isolate).address(),
- "base::ieee754::sinh");
- Add(ExternalReference::ieee754_tan_function(isolate).address(),
- "base::ieee754::tan");
- Add(ExternalReference::ieee754_tanh_function(isolate).address(),
- "base::ieee754::tanh");
- Add(ExternalReference::store_buffer_top(isolate).address(),
- "store_buffer_top");
- Add(ExternalReference::heap_is_marking_flag_address(isolate).address(),
- "heap_is_marking_flag_address");
- Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
- Add(ExternalReference::get_date_field_function(isolate).address(),
- "JSDate::GetField");
- Add(ExternalReference::date_cache_stamp(isolate).address(),
- "date_cache_stamp");
- Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
- "address_of_pending_message_obj");
- Add(ExternalReference::cpu_features().address(), "cpu_features");
- Add(ExternalReference::old_space_allocation_top_address(isolate).address(),
- "Heap::OldSpaceAllocationTopAddress");
- Add(ExternalReference::old_space_allocation_limit_address(isolate).address(),
- "Heap::OldSpaceAllocationLimitAddress");
- Add(ExternalReference::allocation_sites_list_address(isolate).address(),
- "Heap::allocation_sites_list_address()");
- Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
- Add(ExternalReference::is_profiling_address(isolate).address(),
- "Isolate::is_profiling");
- Add(ExternalReference::scheduled_exception_address(isolate).address(),
- "Isolate::scheduled_exception");
- Add(ExternalReference::invoke_function_callback(isolate).address(),
- "InvokeFunctionCallback");
- Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
- "InvokeAccessorGetterCallback");
- Add(ExternalReference::wasm_f32_trunc(isolate).address(),
- "wasm::f32_trunc_wrapper");
- Add(ExternalReference::wasm_f32_floor(isolate).address(),
- "wasm::f32_floor_wrapper");
- Add(ExternalReference::wasm_f32_ceil(isolate).address(),
- "wasm::f32_ceil_wrapper");
- Add(ExternalReference::wasm_f32_nearest_int(isolate).address(),
- "wasm::f32_nearest_int_wrapper");
- Add(ExternalReference::wasm_f64_trunc(isolate).address(),
- "wasm::f64_trunc_wrapper");
- Add(ExternalReference::wasm_f64_floor(isolate).address(),
- "wasm::f64_floor_wrapper");
- Add(ExternalReference::wasm_f64_ceil(isolate).address(),
- "wasm::f64_ceil_wrapper");
- Add(ExternalReference::wasm_f64_nearest_int(isolate).address(),
- "wasm::f64_nearest_int_wrapper");
- Add(ExternalReference::wasm_int64_to_float32(isolate).address(),
- "wasm::int64_to_float32_wrapper");
- Add(ExternalReference::wasm_uint64_to_float32(isolate).address(),
- "wasm::uint64_to_float32_wrapper");
- Add(ExternalReference::wasm_int64_to_float64(isolate).address(),
- "wasm::int64_to_float64_wrapper");
- Add(ExternalReference::wasm_uint64_to_float64(isolate).address(),
- "wasm::uint64_to_float64_wrapper");
- Add(ExternalReference::wasm_float32_to_int64(isolate).address(),
- "wasm::float32_to_int64_wrapper");
- Add(ExternalReference::wasm_float32_to_uint64(isolate).address(),
- "wasm::float32_to_uint64_wrapper");
- Add(ExternalReference::wasm_float64_to_int64(isolate).address(),
- "wasm::float64_to_int64_wrapper");
- Add(ExternalReference::wasm_float64_to_uint64(isolate).address(),
- "wasm::float64_to_uint64_wrapper");
- Add(ExternalReference::wasm_float64_pow(isolate).address(),
- "wasm::float64_pow");
- Add(ExternalReference::wasm_int64_div(isolate).address(), "wasm::int64_div");
- Add(ExternalReference::wasm_int64_mod(isolate).address(), "wasm::int64_mod");
- Add(ExternalReference::wasm_uint64_div(isolate).address(),
- "wasm::uint64_div");
- Add(ExternalReference::wasm_uint64_mod(isolate).address(),
- "wasm::uint64_mod");
- Add(ExternalReference::wasm_word32_ctz(isolate).address(),
- "wasm::word32_ctz");
- Add(ExternalReference::wasm_word64_ctz(isolate).address(),
- "wasm::word64_ctz");
- Add(ExternalReference::wasm_word32_popcnt(isolate).address(),
- "wasm::word32_popcnt");
- Add(ExternalReference::wasm_word64_popcnt(isolate).address(),
- "wasm::word64_popcnt");
- // If the trap handler is not supported, the optimizer will remove these
- // runtime functions. In this case, the arm simulator will break if we add
- // them to the external reference table.
-#ifdef V8_TARGET_ARCH_X64
- Add(ExternalReference::wasm_set_thread_in_wasm_flag(isolate).address(),
- "wasm::set_thread_in_wasm_flag");
- Add(ExternalReference::wasm_clear_thread_in_wasm_flag(isolate).address(),
- "wasm::clear_thread_in_wasm_flag");
-#endif
- Add(ExternalReference::f64_acos_wrapper_function(isolate).address(),
- "f64_acos_wrapper");
- Add(ExternalReference::f64_asin_wrapper_function(isolate).address(),
- "f64_asin_wrapper");
- Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
- "f64_mod_wrapper");
- Add(ExternalReference::wasm_call_trap_callback_for_testing(isolate).address(),
- "wasm::call_trap_callback_for_testing");
- Add(ExternalReference::libc_memchr_function(isolate).address(),
- "libc_memchr");
- Add(ExternalReference::libc_memcpy_function(isolate).address(),
- "libc_memcpy");
- Add(ExternalReference::libc_memmove_function(isolate).address(),
- "libc_memmove");
- Add(ExternalReference::libc_memset_function(isolate).address(),
- "libc_memset");
- Add(ExternalReference::printf_function(isolate).address(), "printf");
- Add(ExternalReference::try_internalize_string_function(isolate).address(),
- "try_internalize_string_function");
- Add(ExternalReference::check_object_type(isolate).address(),
- "check_object_type");
-#ifdef V8_INTL_SUPPORT
- Add(ExternalReference::intl_convert_one_byte_to_lower(isolate).address(),
- "intl_convert_one_byte_to_lower");
- Add(ExternalReference::intl_to_latin1_lower_table(isolate).address(),
- "intl_to_latin1_lower_table");
-#endif // V8_INTL_SUPPORT
- Add(ExternalReference::search_string_raw<const uint8_t, const uint8_t>(
- isolate)
- .address(),
- "search_string_raw<1-byte, 1-byte>");
- Add(ExternalReference::search_string_raw<const uint8_t, const uc16>(isolate)
- .address(),
- "search_string_raw<1-byte, 2-byte>");
- Add(ExternalReference::search_string_raw<const uc16, const uint8_t>(isolate)
- .address(),
- "search_string_raw<2-byte, 1-byte>");
- Add(ExternalReference::search_string_raw<const uc16, const uc16>(isolate)
- .address(),
- "search_string_raw<1-byte, 2-byte>");
- Add(ExternalReference::orderedhashmap_gethash_raw(isolate).address(),
- "orderedhashmap_gethash_raw");
- Add(ExternalReference::get_or_create_hash_raw(isolate).address(),
- "get_or_create_hash_raw");
- Add(ExternalReference::jsreceiver_create_identity_hash(isolate).address(),
- "jsreceiver_create_identity_hash");
- Add(ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
- isolate)
- .address(),
- "copy_fast_number_jsarray_elements_to_typed_array");
- Add(ExternalReference::copy_typed_array_elements_to_typed_array(isolate)
- .address(),
- "copy_typed_array_elements_to_typed_array");
- Add(ExternalReference::copy_typed_array_elements_slice(isolate).address(),
- "copy_typed_array_elements_slice");
- Add(ExternalReference::log_enter_external_function(isolate).address(),
- "Logger::EnterExternal");
- Add(ExternalReference::log_leave_external_function(isolate).address(),
- "Logger::LeaveExternal");
- Add(ExternalReference::address_of_minus_one_half().address(),
- "double_constants.minus_one_half");
- Add(ExternalReference::stress_deopt_count(isolate).address(),
- "Isolate::stress_deopt_count_address()");
- Add(ExternalReference::force_slow_path(isolate).address(),
- "Isolate::force_slow_path_address()");
- Add(ExternalReference::runtime_function_table_address(isolate).address(),
- "Runtime::runtime_function_table_address()");
- Add(ExternalReference::address_of_float_abs_constant().address(),
- "float_absolute_constant");
- Add(ExternalReference::address_of_float_neg_constant().address(),
- "float_negate_constant");
- Add(ExternalReference::address_of_double_abs_constant().address(),
- "double_absolute_constant");
- Add(ExternalReference::address_of_double_neg_constant().address(),
- "double_negate_constant");
- Add(ExternalReference::promise_hook_or_debug_is_active_address(isolate)
- .address(),
- "Isolate::promise_hook_or_debug_is_active_address()");
-
- // Debug addresses
- Add(ExternalReference::debug_is_active_address(isolate).address(),
- "Debug::is_active_address()");
- Add(ExternalReference::debug_hook_on_function_call_address(isolate).address(),
- "Debug::hook_on_function_call_address()");
- Add(ExternalReference::debug_last_step_action_address(isolate).address(),
- "Debug::step_in_enabled_address()");
- Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
- "Debug::step_suspended_generator_address()");
- Add(ExternalReference::debug_restart_fp_address(isolate).address(),
- "Debug::restart_fp_address()");
-
-#ifndef V8_INTERPRETED_REGEXP
- Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
- "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
- Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
- "RegExpMacroAssembler*::CheckStackGuardState()");
- Add(ExternalReference::re_grow_stack(isolate).address(),
- "NativeRegExpMacroAssembler::GrowStack()");
- Add(ExternalReference::re_word_character_map().address(),
- "NativeRegExpMacroAssembler::word_character_map");
- Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
- "RegExpStack::limit_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
- .address(),
- "RegExpStack::memory_address()");
- Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
- "RegExpStack::memory_size()");
- Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
- "OffsetsVector::static_offsets_vector");
-#endif // V8_INTERPRETED_REGEXP
-
- // Runtime entries
- Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- "HandleScope::DeleteExtensions");
- Add(ExternalReference::incremental_marking_record_write_function(isolate)
- .address(),
- "IncrementalMarking::RecordWrite");
- Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- "StoreBuffer::StoreBufferOverflow");
-
- Add(ExternalReference::invalidate_prototype_chains_function(isolate)
- .address(),
- "JSObject::InvalidatePrototypeChains()");
+void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount, *index);
+
+#define ADD_EXTERNAL_REFERENCE(name, desc) \
+ Add(ExternalReference::name(isolate).address(), desc, index);
+ EXTERNAL_REFERENCE_LIST(ADD_EXTERNAL_REFERENCE)
+#undef ADD_EXTERNAL_REFERENCE
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
}
-void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
+void ExternalReferenceTable::AddBuiltins(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
+
struct CBuiltinEntry {
Address address;
const char* name;
@@ -365,11 +83,19 @@ void ExternalReferenceTable::AddBuiltins(Isolate* isolate) {
};
for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
Add(ExternalReference(c_builtins[i].address, isolate).address(),
- c_builtins[i].name);
+ c_builtins[i].name, index);
}
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount,
+ *index);
}
-void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate) {
+void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount,
+ *index);
+
struct RuntimeEntry {
Runtime::FunctionId id;
const char* name;
@@ -383,11 +109,19 @@ void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate) {
for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
ExternalReference ref(runtime_functions[i].id, isolate);
- Add(ref.address(), runtime_functions[i].name);
+ Add(ref.address(), runtime_functions[i].name, index);
}
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount,
+ *index);
}
-void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
+void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount,
+ *index);
+
// Top addresses
static const char* address_names[] = {
#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
@@ -397,11 +131,21 @@ void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
for (int i = 0; i < IsolateAddressId::kIsolateAddressCount; ++i) {
Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)),
- address_names[i]);
+ address_names[i], index);
}
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount,
+ *index);
}
-void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
+void ExternalReferenceTable::AddAccessors(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount,
+ *index);
+
// Accessors
struct AccessorRefTable {
Address address;
@@ -423,46 +167,63 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
};
for (unsigned i = 0; i < arraysize(getters); ++i) {
- Add(getters[i].address, getters[i].name);
+ Add(getters[i].address, getters[i].name, index);
}
for (unsigned i = 0; i < arraysize(setters); ++i) {
- Add(setters[i].address, setters[i].name);
+ Add(setters[i].address, setters[i].name, index);
}
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ *index);
}
-void ExternalReferenceTable::AddStubCache(Isolate* isolate) {
+void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ *index);
+
StubCache* load_stub_cache = isolate->load_stub_cache();
// Stub cache tables
Add(load_stub_cache->key_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->key");
+ "Load StubCache::primary_->key", index);
Add(load_stub_cache->value_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->value");
+ "Load StubCache::primary_->value", index);
Add(load_stub_cache->map_reference(StubCache::kPrimary).address(),
- "Load StubCache::primary_->map");
+ "Load StubCache::primary_->map", index);
Add(load_stub_cache->key_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->key");
+ "Load StubCache::secondary_->key", index);
Add(load_stub_cache->value_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->value");
+ "Load StubCache::secondary_->value", index);
Add(load_stub_cache->map_reference(StubCache::kSecondary).address(),
- "Load StubCache::secondary_->map");
+ "Load StubCache::secondary_->map", index);
StubCache* store_stub_cache = isolate->store_stub_cache();
// Stub cache tables
Add(store_stub_cache->key_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->key");
+ "Store StubCache::primary_->key", index);
Add(store_stub_cache->value_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->value");
+ "Store StubCache::primary_->value", index);
Add(store_stub_cache->map_reference(StubCache::kPrimary).address(),
- "Store StubCache::primary_->map");
+ "Store StubCache::primary_->map", index);
Add(store_stub_cache->key_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->key");
+ "Store StubCache::secondary_->key", index);
Add(store_stub_cache->value_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->value");
+ "Store StubCache::secondary_->value", index);
Add(store_stub_cache->map_reference(StubCache::kSecondary).address(),
- "Store StubCache::secondary_->map");
+ "Store StubCache::secondary_->map", index);
+
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount + kAccessorReferenceCount +
+ kStubCacheReferenceCount,
+ *index);
+ CHECK_EQ(kSize, *index);
}
} // namespace internal
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index b2275049a0..a0b53da2e3 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -7,8 +7,10 @@
#include <vector>
+#include "src/accessors.h"
#include "src/address-map.h"
#include "src/builtins/builtins.h"
+#include "src/external-reference.h"
namespace v8 {
namespace internal {
@@ -20,35 +22,66 @@ class Isolate;
// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
class ExternalReferenceTable {
public:
- static ExternalReferenceTable* instance(Isolate* isolate);
+ // For the nullptr ref, see the constructor.
+ static constexpr int kSpecialReferenceCount = 1;
+ static constexpr int kExternalReferenceCount =
+ ExternalReference::kExternalReferenceCount;
+ static constexpr int kBuiltinsReferenceCount =
+#define COUNT_C_BUILTIN(...) +1
+ BUILTIN_LIST_C(COUNT_C_BUILTIN);
+#undef COUNT_C_BUILTIN
+ static constexpr int kRuntimeReferenceCount =
+ Runtime::kNumFunctions / 2; // Don't count dupe kInline... functions.
+ static constexpr int kIsolateAddressReferenceCount = kIsolateAddressCount;
+ static constexpr int kAccessorReferenceCount =
+ Accessors::kAccessorInfoCount + Accessors::kAccessorSetterCount;
+ // The number of stub cache external references, see AddStubCache.
+ static constexpr int kStubCacheReferenceCount = 12;
+ static constexpr int kSize =
+ kSpecialReferenceCount + kExternalReferenceCount +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ kIsolateAddressReferenceCount + kAccessorReferenceCount +
+ kStubCacheReferenceCount;
- uint32_t size() const { return static_cast<uint32_t>(refs_.size()); }
+ uint32_t size() const { return static_cast<uint32_t>(kSize); }
Address address(uint32_t i) { return refs_[i].address; }
const char* name(uint32_t i) { return refs_[i].name; }
+ bool is_initialized() const { return is_initialized_; }
+
static const char* ResolveSymbol(void* address);
+ static uint32_t OffsetOfEntry(uint32_t i) {
+ // Used in CodeAssembler::LookupExternalReference.
+ STATIC_ASSERT(offsetof(ExternalReferenceEntry, address) == 0);
+ return i * sizeof(ExternalReferenceEntry);
+ }
+
+ ExternalReferenceTable() {}
+ void Init(Isolate* isolate);
+
private:
struct ExternalReferenceEntry {
Address address;
const char* name;
+ ExternalReferenceEntry() : address(nullptr), name(nullptr) {}
ExternalReferenceEntry(Address address, const char* name)
: address(address), name(name) {}
};
- explicit ExternalReferenceTable(Isolate* isolate);
+ void Add(Address address, const char* name, int* index);
- void Add(Address address, const char* name);
+ void AddReferences(Isolate* isolate, int* index);
+ void AddBuiltins(Isolate* isolate, int* index);
+ void AddRuntimeFunctions(Isolate* isolate, int* index);
+ void AddIsolateAddresses(Isolate* isolate, int* index);
+ void AddAccessors(Isolate* isolate, int* index);
+ void AddStubCache(Isolate* isolate, int* index);
- void AddReferences(Isolate* isolate);
- void AddBuiltins(Isolate* isolate);
- void AddRuntimeFunctions(Isolate* isolate);
- void AddIsolateAddresses(Isolate* isolate);
- void AddAccessors(Isolate* isolate);
- void AddStubCache(Isolate* isolate);
+ ExternalReferenceEntry refs_[kSize];
+ bool is_initialized_ = false;
- std::vector<ExternalReferenceEntry> refs_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/external-reference.cc
new file mode 100644
index 0000000000..49186ec159
--- /dev/null
+++ b/deps/v8/src/external-reference.cc
@@ -0,0 +1,1028 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/external-reference.h"
+
+#include "src/api.h"
+#include "src/base/ieee754.h"
+#include "src/codegen.h"
+#include "src/compiler/code-assembler.h"
+#include "src/counters.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer.h"
+#include "src/elements.h"
+#include "src/heap/heap.h"
+#include "src/ic/stub-cache.h"
+#include "src/interpreter/interpreter.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/string-search.h"
+#include "src/wasm/wasm-external-refs.h"
+
+// Include native regexp-macro-assembler.
+#ifndef V8_INTERPRETED_REGEXP
+#if V8_TARGET_ARCH_IA32
+#include "src/regexp/ia32/regexp-macro-assembler-ia32.h" // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/regexp/x64/regexp-macro-assembler-x64.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/regexp/arm64/regexp-macro-assembler-arm64.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/regexp/arm/regexp-macro-assembler-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/regexp/ppc/regexp-macro-assembler-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/regexp/mips/regexp-macro-assembler-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
+#else // Unknown architecture.
+#error "Unknown architecture."
+#endif // Target architecture.
+#endif // V8_INTERPRETED_REGEXP
+
+#ifdef V8_INTL_SUPPORT
+#include "src/intl.h"
+#endif // V8_INTL_SUPPORT
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Common double constants.
+
+struct DoubleConstant BASE_EMBEDDED {
+ double min_int;
+ double one_half;
+ double minus_one_half;
+ double negative_infinity;
+ uint64_t the_hole_nan;
+ double uint32_bias;
+};
+
+static DoubleConstant double_constants;
+
+static struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} float_absolute_constant = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
+
+static struct V8_ALIGNED(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} float_negate_constant = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
+
+static struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
+ uint64_t{0x7FFFFFFFFFFFFFFF}};
+
+static struct V8_ALIGNED(16) {
+ uint64_t a;
+ uint64_t b;
+} double_negate_constant = {uint64_t{0x8000000000000000},
+ uint64_t{0x8000000000000000}};
+
+// Implementation of ExternalReference
+
+static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
+ switch (result_size) {
+ case 1:
+ return ExternalReference::BUILTIN_CALL;
+ case 2:
+ return ExternalReference::BUILTIN_CALL_PAIR;
+ }
+ UNREACHABLE();
+}
+
+void ExternalReference::SetUp() {
+ double_constants.min_int = kMinInt;
+ double_constants.one_half = 0.5;
+ double_constants.minus_one_half = -0.5;
+ double_constants.the_hole_nan = kHoleNanInt64;
+ double_constants.negative_infinity = -V8_INFINITY;
+ double_constants.uint32_bias =
+ static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
+}
+
+ExternalReference::ExternalReference(Address address, Isolate* isolate)
+ : address_(Redirect(isolate, address)) {}
+
+ExternalReference::ExternalReference(
+ ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL,
+ Isolate* isolate = nullptr)
+ : address_(Redirect(isolate, fun->address(), type)) {}
+
+ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
+ : ExternalReference(Runtime::FunctionForId(id), isolate) {}
+
+ExternalReference::ExternalReference(const Runtime::Function* f,
+ Isolate* isolate)
+ : address_(Redirect(isolate, f->entry,
+ BuiltinCallTypeForResultSize(f->result_size))) {}
+
+ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
+ return ExternalReference(isolate);
+}
+
+ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
+ return ExternalReference(isolate->builtins()->builtins_table_address());
+}
+
+ExternalReference ExternalReference::handle_scope_implementer_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->handle_scope_implementer_address());
+}
+
+ExternalReference ExternalReference::pending_microtask_count_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_microtask_count_address());
+}
+
+ExternalReference ExternalReference::interpreter_dispatch_table_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->interpreter()->dispatch_table_address());
+}
+
+ExternalReference ExternalReference::interpreter_dispatch_counters(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->interpreter()->bytecode_dispatch_counters_table());
+}
+
+ExternalReference ExternalReference::bytecode_size_table_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ interpreter::Bytecodes::bytecode_size_table_address());
+}
+
+ExternalReference::ExternalReference(StatsCounter* counter)
+ : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
+
+ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
+ : address_(isolate->get_address_from_id(id)) {}
+
+ExternalReference::ExternalReference(const SCTableReference& table_ref)
+ : address_(table_ref.address()) {}
+
+ExternalReference ExternalReference::incremental_marking_record_write_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+}
+
+ExternalReference ExternalReference::store_buffer_overflow_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+}
+
+ExternalReference ExternalReference::delete_handle_scope_extensions(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(HandleScope::DeleteExtensions)));
+}
+
+ExternalReference ExternalReference::get_date_field_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
+}
+
+ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
+ return ExternalReference(isolate->date_cache()->stamp_address());
+}
+
+// static
+ExternalReference
+ExternalReference::runtime_function_table_address_for_unittests(
+ Isolate* isolate) {
+ return runtime_function_table_address(isolate);
+}
+
+void ExternalReference::set_redirector(
+ Isolate* isolate, ExternalReferenceRedirector* redirector) {
+ // We can't stack them.
+ DCHECK_NULL(isolate->external_reference_redirector());
+ isolate->set_external_reference_redirector(
+ reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
+}
+
+// static
+void* ExternalReference::Redirect(Isolate* isolate, Address address_arg,
+ Type type) {
+ ExternalReferenceRedirector* redirector =
+ reinterpret_cast<ExternalReferenceRedirector*>(
+ isolate->external_reference_redirector());
+ void* address = reinterpret_cast<void*>(address_arg);
+ void* answer =
+ (redirector == nullptr) ? address : (*redirector)(address, type);
+ return answer;
+}
+
+ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
+ return ExternalReference(isolate->stress_deopt_count_address());
+}
+
+ExternalReference ExternalReference::force_slow_path(Isolate* isolate) {
+ return ExternalReference(isolate->force_slow_path_address());
+}
+
+ExternalReference ExternalReference::new_deoptimizer_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
+}
+
+ExternalReference ExternalReference::compute_output_frames_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
+}
+
+ExternalReference ExternalReference::wasm_f32_trunc(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_trunc_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_floor(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_floor_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_ceil(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_ceil_wrapper)));
+}
+ExternalReference ExternalReference::wasm_f32_nearest_int(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f32_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_trunc(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_trunc_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_floor(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_floor_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_ceil(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_ceil_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_f64_nearest_int(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::f64_nearest_int_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float32(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float32(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float32_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_to_float64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_to_float64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_to_float64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_int64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float32_to_uint64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float32_to_uint64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_int64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_int64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_to_uint64(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_to_uint64_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_div(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_int64_mod(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::int64_mod_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_div(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_div_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_uint64_mod(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::uint64_mod_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_ctz(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_ctz_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word64_ctz(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word64_ctz_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_popcnt(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_popcnt_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_rol(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_rol_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_word32_ror(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::word32_ror_wrapper)));
+}
+
+static void f64_acos_wrapper(double* param) {
+ WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
+}
+
+ExternalReference ExternalReference::f64_acos_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_acos_wrapper)));
+}
+
+static void f64_asin_wrapper(double* param) {
+ WriteDoubleValue(param, base::ieee754::asin(ReadDoubleValue(param)));
+}
+
+ExternalReference ExternalReference::f64_asin_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_asin_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_float64_pow(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::float64_pow_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_set_thread_in_wasm_flag(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::set_thread_in_wasm_flag)));
+}
+
+ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::clear_thread_in_wasm_flag)));
+}
+
+static void f64_mod_wrapper(double* param0, double* param1) {
+ WriteDoubleValue(param0,
+ Modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
+}
+
+ExternalReference ExternalReference::f64_mod_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
+}
+
+ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
+}
+
+ExternalReference ExternalReference::log_enter_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
+}
+
+ExternalReference ExternalReference::log_leave_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
+}
+
+ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->roots_array_start());
+}
+
+ExternalReference ExternalReference::allocation_sites_list_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->allocation_sites_list_address());
+}
+
+ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
+ return ExternalReference(isolate->stack_guard()->address_of_jslimit());
+}
+
+ExternalReference ExternalReference::address_of_real_stack_limit(
+ Isolate* isolate) {
+ return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
+}
+
+ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
+ return ExternalReference(isolate->heap()->store_buffer_top_address());
+}
+
+ExternalReference ExternalReference::heap_is_marking_flag_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->IsMarkingFlagAddress());
+}
+
+ExternalReference ExternalReference::new_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
+}
+
+ExternalReference ExternalReference::new_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
+}
+
+ExternalReference ExternalReference::old_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->OldSpaceAllocationTopAddress());
+}
+
+ExternalReference ExternalReference::old_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->heap()->OldSpaceAllocationLimitAddress());
+}
+
+ExternalReference ExternalReference::handle_scope_level_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_level_address(isolate));
+}
+
+ExternalReference ExternalReference::handle_scope_next_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_next_address(isolate));
+}
+
+ExternalReference ExternalReference::handle_scope_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_limit_address(isolate));
+}
+
+ExternalReference ExternalReference::scheduled_exception_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->scheduled_exception_address());
+}
+
+ExternalReference ExternalReference::address_of_pending_message_obj(
+ Isolate* isolate) {
+ return ExternalReference(isolate->pending_message_obj_address());
+}
+
+ExternalReference ExternalReference::address_of_min_int(Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
+}
+
+ExternalReference ExternalReference::address_of_one_half(Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half));
+}
+
+ExternalReference ExternalReference::address_of_minus_one_half(
+ Isolate* isolate) {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_one_half));
+}
+
+ExternalReference ExternalReference::address_of_negative_infinity(
+ Isolate* isolate) {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.negative_infinity));
+}
+
+ExternalReference ExternalReference::address_of_the_hole_nan(Isolate* isolate) {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.the_hole_nan));
+}
+
+ExternalReference ExternalReference::address_of_uint32_bias(Isolate* isolate) {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.uint32_bias));
+}
+
+ExternalReference ExternalReference::address_of_float_abs_constant(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&float_absolute_constant));
+}
+
+ExternalReference ExternalReference::address_of_float_neg_constant(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&float_negate_constant));
+}
+
+ExternalReference ExternalReference::address_of_double_abs_constant(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&double_absolute_constant));
+}
+
+ExternalReference ExternalReference::address_of_double_neg_constant(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(&double_negate_constant));
+}
+
+ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
+ return ExternalReference(isolate->is_profiling_address());
+}
+
+ExternalReference ExternalReference::invoke_function_callback(
+ Isolate* isolate) {
+ Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+ExternalReference ExternalReference::invoke_accessor_getter_callback(
+ Isolate* isolate) {
+ Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+ ExternalReference::Type thunk_type = ExternalReference::PROFILING_GETTER_CALL;
+ ApiFunction thunk_fun(thunk_address);
+ return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+#ifndef V8_INTERPRETED_REGEXP
+
+ExternalReference ExternalReference::re_check_stack_guard_state(
+ Isolate* isolate) {
+ Address function;
+#if V8_TARGET_ARCH_X64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
+#elif V8_TARGET_ARCH_IA32
+ function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM64::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#elif V8_TARGET_ARCH_PPC
+ function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS
+ function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_S390
+ function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
+#else
+ UNREACHABLE();
+#endif
+ return ExternalReference(Redirect(isolate, function));
+}
+
+ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
+}
+
+ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
+}
+
+ExternalReference ExternalReference::re_word_character_map(Isolate* isolate) {
+ return ExternalReference(
+ NativeRegExpMacroAssembler::word_character_map_address());
+}
+
+ExternalReference ExternalReference::address_of_static_offsets_vector(
+ Isolate* isolate) {
+ return ExternalReference(
+ reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_limit(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->limit_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->memory_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
+ Isolate* isolate) {
+ return ExternalReference(isolate->regexp_stack()->memory_size_address());
+}
+
+#endif // V8_INTERPRETED_REGEXP
+
+ExternalReference ExternalReference::ieee754_acos_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::acos), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_acosh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::acosh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_asin_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::asin), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_asinh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::asinh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atan_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::atan), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atanh_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::atanh), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_atan2_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::atan2), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cbrt_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(base::ieee754::cbrt),
+ BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cos_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::cos), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_cosh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::cosh), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_exp_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::exp), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_expm1_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(base::ieee754::expm1), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log1p_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log1p), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log10_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log10), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_log2_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::log2), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_sin_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::sin), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_sinh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::sinh), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_tan_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::tan), BUILTIN_FP_CALL));
+}
+
+ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
+}
+
+void* libc_memchr(void* string, int character, size_t search_length) {
+ return memchr(string, character, search_length);
+}
+
+ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
+}
+
+void* libc_memcpy(void* dest, const void* src, size_t n) {
+ return memcpy(dest, src, n);
+}
+
+ExternalReference ExternalReference::libc_memcpy_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memcpy)));
+}
+
+void* libc_memmove(void* dest, const void* src, size_t n) {
+ return memmove(dest, src, n);
+}
+
+ExternalReference ExternalReference::libc_memmove_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memmove)));
+}
+
+void* libc_memset(void* dest, int byte, size_t n) {
+ DCHECK_EQ(static_cast<char>(byte), byte);
+ return memset(dest, byte, n);
+}
+
+ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
+}
+
+ExternalReference ExternalReference::printf_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(std::printf)));
+}
+
+template <typename SubjectChar, typename PatternChar>
+ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
+ auto f = SearchStringRaw<SubjectChar, PatternChar>;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+ExternalReference ExternalReference::search_string_raw_one_one(
+ Isolate* isolate) {
+ return search_string_raw<const uint8_t, const uint8_t>(isolate);
+}
+
+ExternalReference ExternalReference::search_string_raw_one_two(
+ Isolate* isolate) {
+ return search_string_raw<const uint8_t, const uc16>(isolate);
+}
+
+ExternalReference ExternalReference::search_string_raw_two_one(
+ Isolate* isolate) {
+ return search_string_raw<const uc16, const uint8_t>(isolate);
+}
+
+ExternalReference ExternalReference::search_string_raw_two_two(
+ Isolate* isolate) {
+ return search_string_raw<const uc16, const uc16>(isolate);
+}
+
+ExternalReference ExternalReference::orderedhashmap_gethash_raw(
+ Isolate* isolate) {
+ auto f = OrderedHashMap::GetHash;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
+ typedef Smi* (*GetOrCreateHash)(Isolate * isolate, Object * key);
+ GetOrCreateHash f = Object::GetOrCreateHash;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+ExternalReference ExternalReference::jsreceiver_create_identity_hash(
+ Isolate* isolate) {
+ typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
+ CreateIdentityHash f = JSReceiver::CreateIdentityHash;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+ExternalReference
+ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(CopyFastNumberJSArrayElementsToTypedArray)));
+}
+
+ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
+}
+
+ExternalReference ExternalReference::copy_typed_array_elements_slice(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
+}
+
+ExternalReference ExternalReference::try_internalize_string_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
+}
+
+ExternalReference ExternalReference::check_object_type(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CheckObjectType)));
+}
+
+#ifdef V8_INTL_SUPPORT
+ExternalReference ExternalReference::intl_convert_one_byte_to_lower(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(ConvertOneByteToLower)));
+}
+
+ExternalReference ExternalReference::intl_to_latin1_lower_table(
+ Isolate* isolate) {
+ uint8_t* ptr = const_cast<uint8_t*>(ToLatin1LowerTable());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+#endif // V8_INTL_SUPPORT
+
+// Explicit instantiations for all combinations of 1- and 2-byte strings.
+template ExternalReference
+ExternalReference::search_string_raw<const uint8_t, const uint8_t>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uint8_t, const uc16>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
+
+ExternalReference ExternalReference::page_flags(Page* page) {
+ return ExternalReference(reinterpret_cast<Address>(page) +
+ MemoryChunk::kFlagsOffset);
+}
+
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+ExternalReference ExternalReference::cpu_features(Isolate* isolate) {
+ DCHECK(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
+}
+
+ExternalReference ExternalReference::debug_is_active_address(Isolate* isolate) {
+ return ExternalReference(isolate->debug()->is_active_address());
+}
+
+ExternalReference ExternalReference::debug_hook_on_function_call_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->hook_on_function_call_address());
+}
+
+ExternalReference ExternalReference::debug_execution_mode_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug_execution_mode_address());
+}
+
+ExternalReference ExternalReference::runtime_function_table_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
+}
+
+ExternalReference ExternalReference::invalidate_prototype_chains_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
+}
+
+double power_helper(Isolate* isolate, double x, double y) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ }
+ if (y == 0.5) {
+ lazily_initialize_fast_sqrt(isolate);
+ return (std::isinf(x)) ? V8_INFINITY
+ : fast_sqrt(x + 0.0, isolate); // Convert -0 to +0.
+ }
+ if (y == -0.5) {
+ lazily_initialize_fast_sqrt(isolate);
+ return (std::isinf(x)) ? 0
+ : 1.0 / fast_sqrt(x + 0.0,
+ isolate); // Convert -0 to +0.
+ }
+ return power_double_double(x, y);
+}
+
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+double power_double_int(double x, int y) {
+ double m = (y < 0) ? 1 / x : x;
+ unsigned n = (y < 0) ? -y : y;
+ double p = 1;
+ while (n != 0) {
+ if ((n & 1) != 0) p *= m;
+ m *= m;
+ if ((n & 2) != 0) p *= m;
+ m *= m;
+ n >>= 2;
+ }
+ return p;
+}
+
+double power_double_double(double x, double y) {
+ // The checks for special cases can be dropped in ia32 because it has already
+ // been done in generated code before bailing out here.
+ if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
+ return std::numeric_limits<double>::quiet_NaN();
+ }
+ return Pow(x, y);
+}
+
+double modulo_double_double(double x, double y) { return Modulo(x, y); }
+
+ExternalReference ExternalReference::power_double_double_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(power_double_double),
+ BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::mod_two_doubles_operation(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
+}
+
+ExternalReference ExternalReference::debug_last_step_action_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->last_step_action_address());
+}
+
+ExternalReference ExternalReference::debug_suspended_generator_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->suspended_generator_address());
+}
+
+ExternalReference ExternalReference::debug_restart_fp_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->restart_fp_address());
+}
+
+ExternalReference ExternalReference::fixed_typed_array_base_data_offset(
+ Isolate* isolate) {
+ return ExternalReference(reinterpret_cast<void*>(
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
+}
+
+bool operator==(ExternalReference lhs, ExternalReference rhs) {
+ return lhs.address() == rhs.address();
+}
+
+bool operator!=(ExternalReference lhs, ExternalReference rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(ExternalReference reference) {
+ return base::hash<Address>()(reference.address());
+}
+
+std::ostream& operator<<(std::ostream& os, ExternalReference reference) {
+ os << static_cast<const void*>(reference.address());
+ const Runtime::Function* fn = Runtime::FunctionForEntry(reference.address());
+ if (fn) os << "<" << fn->name << ".entry>";
+ return os;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/external-reference.h
new file mode 100644
index 0000000000..52abf34a63
--- /dev/null
+++ b/deps/v8/src/external-reference.h
@@ -0,0 +1,305 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXTERNAL_REFERENCE_H_
+#define V8_EXTERNAL_REFERENCE_H_
+
+#include "src/globals.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+
+class ApiFunction;
+
+namespace internal {
+
+class Isolate;
+class Page;
+class SCTableReference;
+class StatsCounter;
+
+//------------------------------------------------------------------------------
+// External references
+
+#define EXTERNAL_REFERENCE_LIST(V) \
+ V(address_of_double_abs_constant, "double_absolute_constant") \
+ V(address_of_double_neg_constant, "double_negate_constant") \
+ V(address_of_float_abs_constant, "float_absolute_constant") \
+ V(address_of_float_neg_constant, "float_negate_constant") \
+ V(address_of_min_int, "LDoubleConstant::min_int") \
+ V(address_of_minus_one_half, "double_constants.minus_one_half") \
+ V(address_of_negative_infinity, "LDoubleConstant::negative_infinity") \
+ V(address_of_one_half, "LDoubleConstant::one_half") \
+ V(address_of_pending_message_obj, "address_of_pending_message_obj") \
+ V(address_of_real_stack_limit, "StackGuard::address_of_real_jslimit()") \
+ V(address_of_stack_limit, "StackGuard::address_of_jslimit()") \
+ V(address_of_the_hole_nan, "the_hole_nan") \
+ V(address_of_uint32_bias, "uint32_bias") \
+ V(allocation_sites_list_address, "Heap::allocation_sites_list_address()") \
+ V(builtins_address, "builtins") \
+ V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
+ V(check_object_type, "check_object_type") \
+ V(compute_output_frames_function, "Deoptimizer::ComputeOutputFrames()") \
+ V(copy_fast_number_jsarray_elements_to_typed_array, \
+ "copy_fast_number_jsarray_elements_to_typed_array") \
+ V(copy_typed_array_elements_slice, "copy_typed_array_elements_slice") \
+ V(copy_typed_array_elements_to_typed_array, \
+ "copy_typed_array_elements_to_typed_array") \
+ V(cpu_features, "cpu_features") \
+ V(date_cache_stamp, "date_cache_stamp") \
+ V(debug_execution_mode_address, "Isolate::debug_execution_mode()") \
+ V(debug_hook_on_function_call_address, \
+ "Debug::hook_on_function_call_address()") \
+ V(debug_is_active_address, "Debug::is_active_address()") \
+ V(debug_last_step_action_address, "Debug::step_in_enabled_address()") \
+ V(debug_restart_fp_address, "Debug::restart_fp_address()") \
+ V(debug_suspended_generator_address, \
+ "Debug::step_suspended_generator_address()") \
+ V(delete_handle_scope_extensions, "HandleScope::DeleteExtensions") \
+ V(f64_acos_wrapper_function, "f64_acos_wrapper") \
+ V(f64_asin_wrapper_function, "f64_asin_wrapper") \
+ V(f64_mod_wrapper_function, "f64_mod_wrapper") \
+ V(fixed_typed_array_base_data_offset, "fixed_typed_array_base_data_offset") \
+ V(force_slow_path, "Isolate::force_slow_path_address()") \
+ V(get_date_field_function, "JSDate::GetField") \
+ V(get_or_create_hash_raw, "get_or_create_hash_raw") \
+ V(handle_scope_implementer_address, \
+ "Isolate::handle_scope_implementer_address") \
+ V(handle_scope_level_address, "HandleScope::level") \
+ V(handle_scope_limit_address, "HandleScope::limit") \
+ V(handle_scope_next_address, "HandleScope::next") \
+ V(heap_is_marking_flag_address, "heap_is_marking_flag_address") \
+ V(ieee754_acos_function, "base::ieee754::acos") \
+ V(ieee754_acosh_function, "base::ieee754::acosh") \
+ V(ieee754_asin_function, "base::ieee754::asin") \
+ V(ieee754_asinh_function, "base::ieee754::asinh") \
+ V(ieee754_atan2_function, "base::ieee754::atan2") \
+ V(ieee754_atan_function, "base::ieee754::atan") \
+ V(ieee754_atanh_function, "base::ieee754::atanh") \
+ V(ieee754_cbrt_function, "base::ieee754::cbrt") \
+ V(ieee754_cos_function, "base::ieee754::cos") \
+ V(ieee754_cosh_function, "base::ieee754::cosh") \
+ V(ieee754_exp_function, "base::ieee754::exp") \
+ V(ieee754_expm1_function, "base::ieee754::expm1") \
+ V(ieee754_log10_function, "base::ieee754::log10") \
+ V(ieee754_log1p_function, "base::ieee754::log1p") \
+ V(ieee754_log2_function, "base::ieee754::log2") \
+ V(ieee754_log_function, "base::ieee754::log") \
+ V(ieee754_sin_function, "base::ieee754::sin") \
+ V(ieee754_sinh_function, "base::ieee754::sinh") \
+ V(ieee754_tan_function, "base::ieee754::tan") \
+ V(ieee754_tanh_function, "base::ieee754::tanh") \
+ V(incremental_marking_record_write_function, \
+ "IncrementalMarking::RecordWrite") \
+ V(interpreter_dispatch_counters, "Interpreter::dispatch_counters") \
+ V(interpreter_dispatch_table_address, "Interpreter::dispatch_table_address") \
+ V(invalidate_prototype_chains_function, \
+ "JSObject::InvalidatePrototypeChains()") \
+ V(invoke_accessor_getter_callback, "InvokeAccessorGetterCallback") \
+ V(invoke_function_callback, "InvokeFunctionCallback") \
+ V(isolate_address, "isolate") \
+ V(is_profiling_address, "Isolate::is_profiling") \
+ V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
+ V(libc_memchr_function, "libc_memchr") \
+ V(libc_memcpy_function, "libc_memcpy") \
+ V(libc_memmove_function, "libc_memmove") \
+ V(libc_memset_function, "libc_memset") \
+ V(log_enter_external_function, "Logger::EnterExternal") \
+ V(log_leave_external_function, "Logger::LeaveExternal") \
+ V(mod_two_doubles_operation, "mod_two_doubles") \
+ V(new_deoptimizer_function, "Deoptimizer::New()") \
+ V(new_space_allocation_limit_address, \
+ "Heap::NewSpaceAllocationLimitAddress()") \
+ V(new_space_allocation_top_address, "Heap::NewSpaceAllocationTopAddress()") \
+ V(old_space_allocation_limit_address, \
+ "Heap::OldSpaceAllocationLimitAddress") \
+ V(old_space_allocation_top_address, "Heap::OldSpaceAllocationTopAddress") \
+ V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
+ V(pending_microtask_count_address, \
+ "Isolate::pending_microtask_count_address()") \
+ V(power_double_double_function, "power_double_double_function") \
+ V(printf_function, "printf") \
+ V(promise_hook_or_debug_is_active_address, \
+ "Isolate::promise_hook_or_debug_is_active_address()") \
+ V(roots_array_start, "Heap::roots_array_start()") \
+ V(runtime_function_table_address, \
+ "Runtime::runtime_function_table_address()") \
+ V(scheduled_exception_address, "Isolate::scheduled_exception") \
+ V(search_string_raw_one_one, "search_string_raw_one_one") \
+ V(search_string_raw_one_two, "search_string_raw_one_two") \
+ V(search_string_raw_two_one, "search_string_raw_two_one") \
+ V(search_string_raw_two_two, "search_string_raw_two_two") \
+ V(store_buffer_overflow_function, "StoreBuffer::StoreBufferOverflow") \
+ V(store_buffer_top, "store_buffer_top") \
+ V(stress_deopt_count, "Isolate::stress_deopt_count_address()") \
+ V(try_internalize_string_function, "try_internalize_string_function") \
+ V(wasm_call_trap_callback_for_testing, \
+ "wasm::call_trap_callback_for_testing") \
+ V(wasm_clear_thread_in_wasm_flag, "wasm::clear_thread_in_wasm_flag") \
+ V(wasm_f32_ceil, "wasm::f32_ceil_wrapper") \
+ V(wasm_f32_floor, "wasm::f32_floor_wrapper") \
+ V(wasm_f32_nearest_int, "wasm::f32_nearest_int_wrapper") \
+ V(wasm_f32_trunc, "wasm::f32_trunc_wrapper") \
+ V(wasm_f64_ceil, "wasm::f64_ceil_wrapper") \
+ V(wasm_f64_floor, "wasm::f64_floor_wrapper") \
+ V(wasm_f64_nearest_int, "wasm::f64_nearest_int_wrapper") \
+ V(wasm_f64_trunc, "wasm::f64_trunc_wrapper") \
+ V(wasm_float32_to_int64, "wasm::float32_to_int64_wrapper") \
+ V(wasm_float32_to_uint64, "wasm::float32_to_uint64_wrapper") \
+ V(wasm_float64_pow, "wasm::float64_pow") \
+ V(wasm_float64_to_int64, "wasm::float64_to_int64_wrapper") \
+ V(wasm_float64_to_uint64, "wasm::float64_to_uint64_wrapper") \
+ V(wasm_int64_div, "wasm::int64_div") \
+ V(wasm_int64_mod, "wasm::int64_mod") \
+ V(wasm_int64_to_float32, "wasm::int64_to_float32_wrapper") \
+ V(wasm_int64_to_float64, "wasm::int64_to_float64_wrapper") \
+ V(wasm_set_thread_in_wasm_flag, "wasm::set_thread_in_wasm_flag") \
+ V(wasm_uint64_div, "wasm::uint64_div") \
+ V(wasm_uint64_mod, "wasm::uint64_mod") \
+ V(wasm_uint64_to_float32, "wasm::uint64_to_float32_wrapper") \
+ V(wasm_uint64_to_float64, "wasm::uint64_to_float64_wrapper") \
+ V(wasm_word32_ctz, "wasm::word32_ctz") \
+ V(wasm_word32_popcnt, "wasm::word32_popcnt") \
+ V(wasm_word32_rol, "wasm::word32_rol") \
+ V(wasm_word32_ror, "wasm::word32_ror") \
+ V(wasm_word64_ctz, "wasm::word64_ctz") \
+ V(wasm_word64_popcnt, "wasm::word64_popcnt") \
+ EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V) \
+ EXTERNAL_REFERENCE_LIST_INTL(V)
+
+#ifndef V8_INTERPRETED_REGEXP
+#define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V) \
+ V(address_of_regexp_stack_limit, "RegExpStack::limit_address()") \
+ V(address_of_regexp_stack_memory_address, "RegExpStack::memory_address()") \
+ V(address_of_regexp_stack_memory_size, "RegExpStack::memory_size()") \
+ V(address_of_static_offsets_vector, "OffsetsVector::static_offsets_vector") \
+ V(re_case_insensitive_compare_uc16, \
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()") \
+ V(re_check_stack_guard_state, \
+ "RegExpMacroAssembler*::CheckStackGuardState()") \
+ V(re_grow_stack, "NativeRegExpMacroAssembler::GrowStack()") \
+ V(re_word_character_map, "NativeRegExpMacroAssembler::word_character_map")
+#else
+#define EXTERNAL_REFERENCE_LIST_NON_INTERPRETED_REGEXP(V)
+#endif // V8_INTERPRETED_REGEXP
+
+#ifdef V8_INTL_SUPPORT
+#define EXTERNAL_REFERENCE_LIST_INTL(V) \
+ V(intl_convert_one_byte_to_lower, "intl_convert_one_byte_to_lower") \
+ V(intl_to_latin1_lower_table, "intl_to_latin1_lower_table")
+#else
+#define EXTERNAL_REFERENCE_LIST_INTL(V)
+#endif // V8_INTL_SUPPORT
+
+// An ExternalReference represents a C++ address used in the generated
+// code. All references to C++ functions and variables must be encapsulated
+// in an ExternalReference instance. This is done in order to track the
+// origin of all external references in the code so that they can be bound
+// to the correct addresses when deserializing a heap.
+class ExternalReference BASE_EMBEDDED {
+ public:
+ // Used in the simulator to support different native api calls.
+ enum Type {
+ // Builtin call.
+ // Object* f(v8::internal::Arguments).
+ BUILTIN_CALL, // default
+
+ // Builtin call returning object pair.
+ // ObjectPair f(v8::internal::Arguments).
+ BUILTIN_CALL_PAIR,
+
+ // Builtin that takes float arguments and returns an int.
+ // int f(double, double).
+ BUILTIN_COMPARE_CALL,
+
+ // Builtin call that returns floating point.
+ // double f(double, double).
+ BUILTIN_FP_FP_CALL,
+
+ // Builtin call that returns floating point.
+ // double f(double).
+ BUILTIN_FP_CALL,
+
+ // Builtin call that returns floating point.
+ // double f(double, int).
+ BUILTIN_FP_INT_CALL,
+
+ // Direct call to API function callback.
+ // void f(v8::FunctionCallbackInfo&)
+ DIRECT_API_CALL,
+
+ // Call to function callback via InvokeFunctionCallback.
+ // void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
+ PROFILING_API_CALL,
+
+ // Direct call to accessor getter callback.
+ // void f(Local<Name> property, PropertyCallbackInfo& info)
+ DIRECT_GETTER_CALL,
+
+ // Call to accessor getter callback via InvokeAccessorGetterCallback.
+ // void f(Local<Name> property, PropertyCallbackInfo& info,
+ // AccessorNameGetterCallback callback)
+ PROFILING_GETTER_CALL
+ };
+
+ static constexpr int kExternalReferenceCount =
+#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
+ EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+#undef COUNT_EXTERNAL_REFERENCE
+
+ static void SetUp();
+
+ typedef void* ExternalReferenceRedirector(void* original, Type type);
+
+ ExternalReference() : address_(nullptr) {}
+ explicit ExternalReference(const SCTableReference& table_ref);
+ explicit ExternalReference(StatsCounter* counter);
+ ExternalReference(Address address, Isolate* isolate);
+ ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
+ ExternalReference(const Runtime::Function* f, Isolate* isolate);
+ ExternalReference(IsolateAddressId id, Isolate* isolate);
+ ExternalReference(Runtime::FunctionId id, Isolate* isolate);
+
+ template <typename SubjectChar, typename PatternChar>
+ static ExternalReference search_string_raw(Isolate* isolate);
+
+ static ExternalReference page_flags(Page* page);
+
+ static ExternalReference ForDeoptEntry(Address entry);
+
+#define DECL_EXTERNAL_REFERENCE(name, desc) \
+ static ExternalReference name(Isolate* isolate);
+ EXTERNAL_REFERENCE_LIST(DECL_EXTERNAL_REFERENCE)
+#undef DECL_EXTERNAL_REFERENCE
+
+ V8_EXPORT_PRIVATE V8_NOINLINE static ExternalReference
+ runtime_function_table_address_for_unittests(Isolate* isolate);
+
+ Address address() const { return reinterpret_cast<Address>(address_); }
+
+ // This lets you register a function that rewrites all external references.
+ // Used by the ARM simulator to catch calls to external references.
+ static void set_redirector(Isolate* isolate,
+ ExternalReferenceRedirector* redirector);
+
+ private:
+ explicit ExternalReference(void* address) : address_(address) {}
+
+ static void* Redirect(Isolate* isolate, Address address_arg,
+ Type type = ExternalReference::BUILTIN_CALL);
+
+ void* address_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference);
+bool operator!=(ExternalReference, ExternalReference);
+
+size_t hash_value(ExternalReference);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXTERNAL_REFERENCE_H_
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index 888fa01854..a2e27807ef 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -5,11 +5,11 @@
#ifndef V8_FEEDBACK_VECTOR_INL_H_
#define V8_FEEDBACK_VECTOR_INL_H_
-#include "src/factory-inl.h"
#include "src/feedback-vector.h"
#include "src/globals.h"
+#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
+#include "src/objects/maybe-object-inl.h"
#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
@@ -18,21 +18,35 @@
namespace v8 {
namespace internal {
+INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
+
+int32_t FeedbackMetadata::synchronized_slot_count() const {
+ return base::Acquire_Load(reinterpret_cast<const base::Atomic32*>(
+ FIELD_ADDR_CONST(this, kSlotCountOffset)));
+}
+
// static
FeedbackMetadata* FeedbackMetadata::cast(Object* obj) {
DCHECK(obj->IsFeedbackMetadata());
return reinterpret_cast<FeedbackMetadata*>(obj);
}
-bool FeedbackMetadata::is_empty() const {
- if (length() == 0) return true;
- return false;
+int32_t FeedbackMetadata::get(int index) const {
+ DCHECK(index >= 0 && index < length());
+ int offset = kHeaderSize + index * kInt32Size;
+ return READ_INT32_FIELD(this, offset);
}
-int FeedbackMetadata::slot_count() const {
- if (length() == 0) return 0;
- DCHECK_GT(length(), kReservedIndexCount);
- return Smi::ToInt(get(kSlotsCountIndex));
+void FeedbackMetadata::set(int index, int32_t value) {
+ DCHECK(index >= 0 && index < length());
+ int offset = kHeaderSize + index * kInt32Size;
+ WRITE_INT32_FIELD(this, offset, value);
+}
+
+bool FeedbackMetadata::is_empty() const { return slot_count() == 0; }
+
+int FeedbackMetadata::length() const {
+ return FeedbackMetadata::length(slot_count());
}
// static
@@ -64,6 +78,7 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
return 2;
@@ -77,7 +92,7 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo,
kSharedFunctionInfoOffset)
-ACCESSORS(FeedbackVector, optimized_code_cell, Object, kOptimizedCodeOffset)
+WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi, kOptimizedCodeOffset)
INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)
INT32_ACCESSORS(FeedbackVector, invocation_count, kInvocationCountOffset)
INT32_ACCESSORS(FeedbackVector, profiler_ticks, kProfilerTicksOffset)
@@ -99,16 +114,18 @@ void FeedbackVector::increment_deopt_count() {
}
Code* FeedbackVector::optimized_code() const {
- Object* slot = optimized_code_cell();
- if (slot->IsSmi()) return nullptr;
- WeakCell* cell = WeakCell::cast(slot);
- return cell->cleared() ? nullptr : Code::cast(cell->value());
+ MaybeObject* slot = optimized_code_weak_or_smi();
+ DCHECK(slot->IsSmi() || slot->IsClearedWeakHeapObject() ||
+ slot->IsWeakHeapObject());
+ HeapObject* heap_object;
+ return slot->ToStrongOrWeakHeapObject(&heap_object) ? Code::cast(heap_object)
+ : nullptr;
}
OptimizationMarker FeedbackVector::optimization_marker() const {
- Object* slot = optimized_code_cell();
- if (!slot->IsSmi()) return OptimizationMarker::kNone;
- Smi* value = Smi::cast(slot);
+ MaybeObject* slot = optimized_code_weak_or_smi();
+ Smi* value;
+ if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
return static_cast<OptimizationMarker>(value->value());
}
@@ -247,6 +264,7 @@ void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackSlotKind::kStoreGlobalStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile: {
if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index f5240baf1b..28ed6394c5 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -53,16 +53,16 @@ std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
}
FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const {
- int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
- int data = Smi::ToInt(get(index));
+ int index = VectorICComputer::index(0, slot.ToInt());
+ int data = get(index);
return VectorICComputer::decode(data, slot.ToInt());
}
void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
- int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
- int data = Smi::ToInt(get(index));
+ int index = VectorICComputer::index(0, slot.ToInt());
+ int data = get(index);
int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
- set(index, Smi::FromInt(new_data));
+ set(index, new_data);
}
// static
@@ -71,10 +71,8 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
Factory* factory = isolate->factory();
const int slot_count = spec == nullptr ? 0 : spec->slots();
- const int slot_kinds_length = VectorICComputer::word_count(slot_count);
- const int length = slot_kinds_length + kReservedIndexCount;
- if (length == kReservedIndexCount) {
- return Handle<FeedbackMetadata>::cast(factory->empty_fixed_array());
+ if (slot_count == 0) {
+ return factory->empty_feedback_metadata();
}
#ifdef DEBUG
for (int i = 0; i < slot_count;) {
@@ -89,15 +87,10 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
}
#endif
- Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
- array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
- // Fill the bit-vector part with zeros.
- for (int i = 0; i < slot_kinds_length; i++) {
- array->set(kReservedIndexCount + i, Smi::kZero);
- }
-
- Handle<FeedbackMetadata> metadata = Handle<FeedbackMetadata>::cast(array);
+ Handle<FeedbackMetadata> metadata = factory->NewFeedbackMetadata(slot_count);
+ // Initialize the slots. The raw data section has already been pre-zeroed in
+ // NewFeedbackMetadata.
for (int i = 0; i < slot_count; i++) {
DCHECK(spec);
FeedbackSlot slot(i);
@@ -105,13 +98,6 @@ Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
metadata->SetKind(slot, kind);
}
- // It's important that the FeedbackMetadata have a COW map, since it's
- // pointed to by both a SharedFunctionInfo and indirectly by closures through
- // the FeedbackVector. The serializer uses the COW map type to decide
- // this object belongs in the startup snapshot and not the partial
- // snapshot(s).
- metadata->set_map(isolate->heap()->fixed_cow_array_map());
-
return metadata;
}
@@ -163,6 +149,8 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
return "StoreKeyedSloppy";
case FeedbackSlotKind::kStoreKeyedStrict:
return "StoreKeyedStrict";
+ case FeedbackSlotKind::kStoreInArrayLiteral:
+ return "StoreInArrayLiteral";
case FeedbackSlotKind::kBinaryOp:
return "BinaryOp";
case FeedbackSlotKind::kCompareOp:
@@ -188,7 +176,7 @@ const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
bool FeedbackMetadata::HasTypeProfileSlot() const {
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
- return slot.ToInt() < this->length() &&
+ return slot.ToInt() < slot_count() &&
GetKind(slot) == FeedbackSlotKind::kTypeProfile;
}
@@ -217,10 +205,11 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
DCHECK_EQ(vector->length(), slot_count);
DCHECK_EQ(vector->shared_function_info(), *shared);
- DCHECK_EQ(vector->optimized_code_cell(),
- Smi::FromEnum(FLAG_log_function_events
- ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone));
+ DCHECK_EQ(
+ vector->optimized_code_weak_or_smi(),
+ MaybeObject::FromSmi(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone)));
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK_EQ(vector->deopt_count(), 0);
@@ -268,6 +257,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
case FeedbackSlotKind::kTypeProfile:
case FeedbackSlotKind::kInstanceOf:
@@ -322,9 +312,7 @@ void FeedbackVector::AddToVectorsForProfilingTools(
void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
Handle<Code> code) {
DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
- Factory* factory = vector->GetIsolate()->factory();
- Handle<WeakCell> cell = factory->NewWeakCell(code);
- vector->set_optimized_code_cell(*cell);
+ vector->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
}
void FeedbackVector::ClearOptimizedCode() {
@@ -338,21 +326,22 @@ void FeedbackVector::ClearOptimizationMarker() {
}
void FeedbackVector::SetOptimizationMarker(OptimizationMarker marker) {
- set_optimized_code_cell(Smi::FromEnum(marker));
+ set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(marker)));
}
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo* shared, const char* reason) {
- Object* slot = optimized_code_cell();
- if (slot->IsSmi()) return;
+ MaybeObject* slot = optimized_code_weak_or_smi();
+ if (slot->IsSmi()) {
+ return;
+ }
- WeakCell* cell = WeakCell::cast(slot);
- if (cell->cleared()) {
+ if (slot->IsClearedWeakHeapObject()) {
ClearOptimizationMarker();
return;
}
- Code* code = Code::cast(cell->value());
+ Code* code = Code::cast(slot->GetHeapObject());
if (code->marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
@@ -469,6 +458,7 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed:
@@ -548,10 +538,12 @@ InlineCacheState FeedbackNexus::StateFromFeedback() const {
}
return UNINITIALIZED;
}
+
case FeedbackSlotKind::kStoreNamedSloppy:
case FeedbackSlotKind::kStoreNamedStrict:
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral:
case FeedbackSlotKind::kStoreOwnNamed:
case FeedbackSlotKind::kLoadProperty:
case FeedbackSlotKind::kLoadKeyed: {
@@ -773,7 +765,8 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
- IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()));
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -851,7 +844,8 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
bool FeedbackNexus::FindHandlers(ObjectHandles* code_list, int length) const {
DCHECK(IsLoadICKind(kind()) || IsStoreICKind(kind()) ||
IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
- IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()));
+ IsStoreOwnICKind(kind()) || IsStoreDataPropertyInLiteralKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()));
Object* feedback = GetFeedback();
Isolate* isolate = GetIsolate();
@@ -914,7 +908,7 @@ KeyedAccessLoadMode FeedbackNexus::GetKeyedAccessLoadMode() const {
}
KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
- DCHECK(IsKeyedStoreICKind(kind()));
+ DCHECK(IsKeyedStoreICKind(kind()) || IsStoreInArrayLiteralICKind(kind()));
KeyedAccessStoreMode mode = STANDARD_STORE;
MapHandles maps;
ObjectHandles handlers;
@@ -944,6 +938,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
major_key == CodeStub::StoreFastElement ||
major_key == CodeStub::StoreSlowElement ||
+ major_key == CodeStub::StoreInArrayLiteralSlow ||
major_key == CodeStub::ElementsTransitionAndStore ||
major_key == CodeStub::NoCache);
if (major_key != CodeStub::NoCache) {
@@ -956,7 +951,8 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
}
IcCheckType FeedbackNexus::GetKeyType() const {
- DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()));
+ DCHECK(IsKeyedStoreICKind(kind()) || IsKeyedLoadICKind(kind()) ||
+ IsStoreInArrayLiteralICKind(kind()));
Object* feedback = GetFeedback();
if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
return static_cast<IcCheckType>(Smi::ToInt(GetFeedbackExtra()));
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 8faff32649..02acb79751 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -42,6 +42,7 @@ enum class FeedbackSlotKind {
kStoreNamedStrict,
kStoreOwnNamed,
kStoreKeyedStrict,
+ kStoreInArrayLiteral,
kBinaryOp,
kCompareOp,
kStoreDataPropertyInLiteral,
@@ -94,6 +95,10 @@ inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
kind == FeedbackSlotKind::kStoreKeyedStrict;
}
+inline bool IsStoreInArrayLiteralICKind(FeedbackSlotKind kind) {
+ return kind == FeedbackSlotKind::kStoreInArrayLiteral;
+}
+
inline bool IsGlobalICKind(FeedbackSlotKind kind) {
return IsLoadGlobalICKind(kind) || IsStoreGlobalICKind(kind);
}
@@ -151,9 +156,9 @@ class FeedbackVector : public HeapObject {
// feedback vector.
DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
- // [optimized_code_cell]: WeakCell containing optimized code or a Smi marker
- // defining optimization behaviour.
- DECL_ACCESSORS(optimized_code_cell, Object)
+ // [optimized_code_weak_or_smi]: weak reference to optimized code or a Smi
+ // marker defining optimization behaviour.
+ DECL_ACCESSORS(optimized_code_weak_or_smi, MaybeObject)
// [length]: The length of the feedback vector (not including the header, i.e.
// the number of feedback slots).
@@ -364,6 +369,10 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
: FeedbackSlotKind::kStoreKeyedSloppy);
}
+ FeedbackSlot AddStoreInArrayLiteralICSlot() {
+ return AddSlot(FeedbackSlotKind::kStoreInArrayLiteral);
+ }
+
FeedbackSlot AddBinaryOpICSlot() {
return AddSlot(FeedbackSlotKind::kBinaryOp);
}
@@ -403,19 +412,21 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
ZoneVector<unsigned char> slot_kinds_;
};
-// The shape of the FeedbackMetadata is an array with:
-// 0: slot_count
-// 1: names table
-// 2: parameters table
-// 3..N: slot kinds packed into a bit vector
-//
-class FeedbackMetadata : public FixedArray {
+// FeedbackMetadata is an array-like object with a slot count (indicating how
+// many slots are stored). We save space by packing several slots into an array
+// of int32 data. The length is never stored - it is always calculated from
+// slot_count. All instances are created through the static New function, and
+// the number of slots is static once an instance is created.
+class FeedbackMetadata : public HeapObject {
public:
// Casting.
static inline FeedbackMetadata* cast(Object* obj);
- static const int kSlotsCountIndex = 0;
- static const int kReservedIndexCount = 1;
+ // The number of slots that this metadata contains. Stored as an int32.
+ DECL_INT32_ACCESSORS(slot_count)
+
+ // Get slot_count using an acquire load.
+ inline int32_t synchronized_slot_count() const;
// Returns number of feedback vector elements used by given slot kind.
static inline int GetSlotSize(FeedbackSlotKind kind);
@@ -424,9 +435,6 @@ class FeedbackMetadata : public FixedArray {
inline bool is_empty() const;
- // Returns number of slots in the vector.
- inline int slot_count() const;
-
// Returns slot kind for given slot.
FeedbackSlotKind GetKind(FeedbackSlot slot) const;
@@ -440,21 +448,47 @@ class FeedbackMetadata : public FixedArray {
#endif // OBJECT_PRINT
DECL_PRINTER(FeedbackMetadata)
+ DECL_VERIFIER(FeedbackMetadata)
static const char* Kind2String(FeedbackSlotKind kind);
bool HasTypeProfileSlot() const;
+ // Garbage collection support.
+ // This includes any necessary padding at the end of the object for pointer
+ // size alignment.
+ static int SizeFor(int slot_count) {
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length(slot_count) * kInt32Size);
+ }
+
+ static const int kSlotCountOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kSlotCountOffset + kInt32Size;
+
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
+
private:
friend class AccessorAssembler;
+ // Raw accessors to the encoded slot data.
+ inline int32_t get(int index) const;
+ inline void set(int index, int32_t value);
+
+ // The number of int32 data fields needed to store {slot_count} slots.
+ // Does not include any extra padding for pointer size alignment.
+ static int length(int slot_count) {
+ return VectorICComputer::word_count(slot_count);
+ }
+ inline int length() const;
+
static const int kFeedbackSlotKindBits = 5;
STATIC_ASSERT(static_cast<int>(FeedbackSlotKind::kKindsNumber) <
(1 << kFeedbackSlotKindBits));
void SetKind(FeedbackSlot slot, FeedbackSlotKind kind);
- typedef BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits, kSmiValueSize,
- uint32_t>
+ typedef BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits,
+ kInt32Size * kBitsPerByte, uint32_t>
VectorICComputer;
DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackMetadata);
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 8fceed0783..311620ebc5 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -213,7 +213,8 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_class_fields, "harmony fields in class literals") \
V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_bigint, "harmony arbitrary precision integers")
+ V(harmony_array_flatten, "harmony Array.prototype.flat{ten,Map}") \
+ V(harmony_string_matchall, "harmony String.prototype.matchAll")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED(V) \
@@ -221,7 +222,8 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
"harmony disallow non undefined primitive return value from class " \
"constructor") \
V(harmony_public_fields, "harmony public fields in class literals") \
- V(harmony_private_fields, "harmony private fields in class literals")
+ V(harmony_private_fields, "harmony private fields in class literals") \
+ V(harmony_numeric_separator, "harmony numeric separator between digits")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING_BASE(V) \
@@ -234,6 +236,7 @@ DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
V(harmony_promise_finally, "harmony Promise.prototype.finally") \
V(harmony_optional_catch_binding, "allow omitting binding in catch blocks") \
V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_bigint, "harmony arbitrary precision integers") \
V(harmony_dynamic_import, "harmony dynamic import")
#ifdef V8_INTL_SUPPORT
@@ -270,8 +273,7 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
#undef FLAG_SHIPPING_FEATURES
#ifdef V8_INTL_SUPPORT
-DEFINE_BOOL(icu_timezone_data, false,
- "get information about timezones from ICU")
+DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#endif
#ifdef V8_ENABLE_FUTURE
@@ -477,6 +479,7 @@ DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
+DEFINE_BOOL(turbo_allocation_folding, true, "Turbofan allocation folding")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
@@ -501,7 +504,9 @@ DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
DEFINE_BOOL(turbo_disable_switch_jump_table, false,
"do not emit jump-tables in Turbofan")
DEFINE_IMPLICATION(untrusted_code_mitigations, turbo_disable_switch_jump_table)
+
DEFINE_BOOL(branch_load_poisoning, false, "Mask loads with branch conditions.")
+DEFINE_IMPLICATION(future, branch_load_poisoning)
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
@@ -520,11 +525,8 @@ DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
DEFINE_DEBUG_BOOL(wasm_trace_native_heap, false,
"trace wasm native heap events")
-DEFINE_BOOL(wasm_jit_to_native, true,
- "JIT wasm code to native (not JS GC) memory")
DEFINE_BOOL(wasm_write_protect_code_memory, false,
"write protect code memory on the wasm native heap")
-DEFINE_IMPLICATION(future, wasm_jit_to_native)
DEFINE_BOOL(wasm_trace_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
@@ -541,6 +543,9 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum memory size of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
+DEFINE_BOOL(wasm_tier_up, false,
+ "enable basic tiering up to the optimizing compiler")
+DEFINE_IMPLICATION(wasm_tier_up, liftoff)
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_decode_time, false,
"trace decoding time of wasm code")
@@ -587,6 +592,10 @@ DEFINE_BOOL(experimental_wasm_sat_f2i_conversions, false,
"enable non-trapping float-to-int conversions for wasm")
DEFINE_BOOL(experimental_wasm_se, false,
"enable prototype sign extension opcodes for wasm")
+DEFINE_BOOL(experimental_wasm_anyref, false,
+ "enable prototype anyref support for wasm")
+DEFINE_BOOL(experimental_wasm_mut_global, false,
+ "enable prototype import/export mutable global support for wasm")
DEFINE_BOOL(wasm_opt, false, "enable wasm optimization")
DEFINE_BOOL(wasm_no_bounds_checks, false,
@@ -672,7 +681,13 @@ DEFINE_BOOL(incremental_marking_wrappers, true,
DEFINE_BOOL(trace_unmapper, false, "Trace the unmapping")
DEFINE_BOOL(parallel_scavenge, true, "parallel scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
-DEFINE_BOOL(write_protect_code_memory, false, "write protect code memory")
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
+#define V8_WRITE_PROTECT_CODE_MEMORY_BOOL false
+#else
+#define V8_WRITE_PROTECT_CODE_MEMORY_BOOL true
+#endif
+DEFINE_BOOL(write_protect_code_memory, V8_WRITE_PROTECT_CODE_MEMORY_BOOL,
+ "write protect code memory")
#ifdef V8_CONCURRENT_MARKING
#define V8_CONCURRENT_MARKING_BOOL true
#else
@@ -683,11 +698,6 @@ DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
DEFINE_BOOL(parallel_marking, true, "use parallel marking in atomic pause")
DEFINE_IMPLICATION(parallel_marking, concurrent_marking)
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
-DEFINE_BOOL(minor_mc_parallel_marking, true,
- "use parallel marking for the young generation")
-DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
- "trace parallel marking for the young generation")
-DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
"use concurrent store buffer processing")
@@ -695,6 +705,8 @@ DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
"use parallel pointer update during compaction")
+DEFINE_BOOL(detect_ineffective_gcs_near_heap_limit, true,
+ "trigger out-of-memory failure to avoid GC storm near heap limit")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(trace_stress_marking, false, "trace stress marking progress")
@@ -829,7 +841,7 @@ DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
-DEFINE_BOOL(enable_experimental_builtins, true,
+DEFINE_BOOL(enable_experimental_builtins, false,
"enable new csa-based experimental builtins")
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
@@ -955,7 +967,6 @@ DEFINE_BOOL(native_code_counters, false,
// objects.cc
DEFINE_BOOL(thin_strings, true, "Enable ThinString support")
-DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
@@ -974,9 +985,6 @@ DEFINE_BOOL(preparser_scope_analysis, true,
"perform scope analysis for preparsed inner functions")
DEFINE_IMPLICATION(preparser_scope_analysis, aggressive_lazy_inner_functions)
-// compiler.cc
-DEFINE_BOOL(background_compile, true, "enable background compilation")
-
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
@@ -1029,13 +1037,6 @@ DEFINE_INT(fuzzer_random_seed, 0,
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
DEFINE_BOOL(print_all_exceptions, false,
"print exception object and stack trace on each thrown exception")
-#ifdef V8_EMBEDDED_BUILTINS
-DEFINE_BOOL(stress_off_heap_code, false,
- "Move code objects off-heap for testing.")
-#else
-FLAG_READONLY(BOOL, bool, stress_off_heap_code, false,
- "Move code objects off-heap for testing.")
-#endif
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
@@ -1069,12 +1070,28 @@ DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
// mksnapshot.cc
+DEFINE_STRING(embedded_src, nullptr,
+ "Path for the generated embedded data file. (mksnapshot only)")
+DEFINE_STRING(
+ embedded_variant, nullptr,
+ "Label to disambiguate symbols in embedded data file. (mksnapshot only)")
DEFINE_STRING(startup_src, nullptr,
"Write V8 startup as C++ src. (mksnapshot only)")
DEFINE_STRING(startup_blob, nullptr,
"Write V8 startup blob file. (mksnapshot only)")
//
+// Minor mark compact collector flags.
+//
+#ifdef ENABLE_MINOR_MC
+DEFINE_BOOL(minor_mc_parallel_marking, true,
+ "use parallel marking for the young generation")
+DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
+ "trace parallel marking for the young generation")
+DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+#endif // ENABLE_MINOR_MC
+
+//
// Dev shell flags
//
@@ -1218,6 +1235,9 @@ DEFINE_BOOL(prof_browser_mode, true,
DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_BOOL(interpreted_frames_native_stack, false,
+ "Show interpreted frames on the native stack (useful for external "
+ "profilers).")
DEFINE_BOOL(perf_basic_prof, false,
"Enable perf linux profiler (basic support).")
DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
@@ -1340,7 +1360,9 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_marking)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_pointer_update)
DEFINE_NEG_IMPLICATION(single_threaded_gc, parallel_scavenge)
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_store_buffer)
+#ifdef ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, minor_mc_parallel_marking)
+#endif // ENABLE_MINOR_MC
DEFINE_NEG_IMPLICATION(single_threaded_gc, concurrent_array_buffer_freeing)
#undef FLAG
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/frame-constants.h
index f042855657..46815273e9 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/frame-constants.h
@@ -50,23 +50,24 @@ namespace internal {
//
class CommonFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = kCallerFPOffset + 1 * kFPOnStackSize;
- static const int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
+ static constexpr int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerPCOffset = kCallerFPOffset + 1 * kFPOnStackSize;
+ static constexpr int kCallerSPOffset = kCallerPCOffset + 1 * kPCOnStackSize;
// Fixed part of the frame consists of return address, caller fp,
// constant pool (if FLAG_enable_embedded_constant_pool), context, and
// function. StandardFrame::IterateExpressions assumes that kLastObjectOffset
// is the last object pointer.
- static const int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
- static const int kFixedSlotCountAboveFp =
+ static constexpr int kFixedFrameSizeAboveFp = kPCOnStackSize + kFPOnStackSize;
+ static constexpr int kFixedSlotCountAboveFp =
kFixedFrameSizeAboveFp / kPointerSize;
- static const int kCPSlotSize =
+ static constexpr int kCPSlotSize =
FLAG_enable_embedded_constant_pool ? kPointerSize : 0;
- static const int kCPSlotCount = kCPSlotSize / kPointerSize;
- static const int kConstantPoolOffset = kCPSlotSize ? -1 * kPointerSize : 0;
- static const int kContextOrFrameTypeSize = kPointerSize;
- static const int kContextOrFrameTypeOffset =
+ static constexpr int kCPSlotCount = kCPSlotSize / kPointerSize;
+ static constexpr int kConstantPoolOffset =
+ kCPSlotSize ? -1 * kPointerSize : 0;
+ static constexpr int kContextOrFrameTypeSize = kPointerSize;
+ static constexpr int kContextOrFrameTypeOffset =
-(kCPSlotSize + kContextOrFrameTypeSize);
};
@@ -103,15 +104,16 @@ class CommonFrameConstants : public AllStatic {
//
class StandardFrameConstants : public CommonFrameConstants {
public:
- static const int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
- static const int kFixedFrameSize =
+ static constexpr int kFixedFrameSizeFromFp = 2 * kPointerSize + kCPSlotSize;
+ static constexpr int kFixedFrameSize =
kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
- static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
- static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
- static const int kContextOffset = kContextOrFrameTypeOffset;
- static const int kFunctionOffset = -2 * kPointerSize - kCPSlotSize;
- static const int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
- static const int kLastObjectOffset = kContextOffset;
+ static constexpr int kFixedSlotCountFromFp =
+ kFixedFrameSizeFromFp / kPointerSize;
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static constexpr int kContextOffset = kContextOrFrameTypeOffset;
+ static constexpr int kFunctionOffset = -2 * kPointerSize - kCPSlotSize;
+ static constexpr int kExpressionsOffset = -3 * kPointerSize - kCPSlotSize;
+ static constexpr int kLastObjectOffset = kContextOffset;
};
// OptimizedBuiltinFrameConstants are used for TF-generated builtins. They
@@ -150,10 +152,10 @@ class StandardFrameConstants : public CommonFrameConstants {
//
class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
public:
- static const int kArgCSize = kPointerSize;
- static const int kArgCOffset = -3 * kPointerSize - kCPSlotSize;
- static const int kFixedFrameSize = kFixedFrameSizeAboveFp - kArgCOffset;
- static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static constexpr int kArgCSize = kPointerSize;
+ static constexpr int kArgCOffset = -3 * kPointerSize - kCPSlotSize;
+ static constexpr int kFixedFrameSize = kFixedFrameSizeAboveFp - kArgCOffset;
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
};
// TypedFrames have a SMI type maker value below the saved FP/constant pool to
@@ -189,14 +191,15 @@ class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
//
class TypedFrameConstants : public CommonFrameConstants {
public:
- static const int kFrameTypeSize = kContextOrFrameTypeSize;
- static const int kFrameTypeOffset = kContextOrFrameTypeOffset;
- static const int kFixedFrameSizeFromFp = kCPSlotSize + kFrameTypeSize;
- static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize;
- static const int kFixedFrameSize =
+ static constexpr int kFrameTypeSize = kContextOrFrameTypeSize;
+ static constexpr int kFrameTypeOffset = kContextOrFrameTypeOffset;
+ static constexpr int kFixedFrameSizeFromFp = kCPSlotSize + kFrameTypeSize;
+ static constexpr int kFixedSlotCountFromFp =
+ kFixedFrameSizeFromFp / kPointerSize;
+ static constexpr int kFixedFrameSize =
StandardFrameConstants::kFixedFrameSizeAboveFp + kFixedFrameSizeFromFp;
- static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
- static const int kFirstPushedFrameValueOffset =
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+ static constexpr int kFirstPushedFrameValueOffset =
-StandardFrameConstants::kCPSlotSize - kFrameTypeSize - kPointerSize;
};
@@ -206,44 +209,46 @@ class TypedFrameConstants : public CommonFrameConstants {
(TypedFrameConstants::kFixedFrameSize + (count)*kPointerSize)
#define TYPED_FRAME_SIZE_FROM_SP(count) \
(TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kPointerSize)
-#define DEFINE_TYPED_FRAME_SIZES(count) \
- static const int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
- static const int kFixedSlotCount = kFixedFrameSize / kPointerSize; \
- static const int kFixedFrameSizeFromFp = TYPED_FRAME_SIZE_FROM_SP(count); \
- static const int kFixedSlotCountFromFp = kFixedFrameSizeFromFp / kPointerSize
+#define DEFINE_TYPED_FRAME_SIZES(count) \
+ static constexpr int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
+ static constexpr int kFixedSlotCount = kFixedFrameSize / kPointerSize; \
+ static constexpr int kFixedFrameSizeFromFp = \
+ TYPED_FRAME_SIZE_FROM_SP(count); \
+ static constexpr int kFixedSlotCountFromFp = \
+ kFixedFrameSizeFromFp / kPointerSize
class ArgumentsAdaptorFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static constexpr int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
DEFINE_TYPED_FRAME_SIZES(3);
};
class BuiltinFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
};
class InternalFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
DEFINE_TYPED_FRAME_SIZES(1);
};
class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kConstructorOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static const int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
- static const int kNewTargetOrImplicitReceiverOffset =
+ static constexpr int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kConstructorOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static constexpr int kPaddingOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ static constexpr int kNewTargetOrImplicitReceiverOffset =
TYPED_FRAME_PUSHED_VALUE_OFFSET(4);
DEFINE_TYPED_FRAME_SIZES(5);
};
@@ -251,12 +256,17 @@ class ConstructFrameConstants : public TypedFrameConstants {
class BuiltinContinuationFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
- static const int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kFunctionOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kFrameSPtoFPDeltaAtDeoptimize =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kBuiltinContextOffset =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static constexpr int kBuiltinOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+
// The argument count is in the first allocatable register, stored below the
// fixed part of the frame and therefore is not part of the fixed frame size.
- static const int kArgCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- DEFINE_TYPED_FRAME_SIZES(2);
+ static constexpr int kArgCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(4);
+ DEFINE_TYPED_FRAME_SIZES(4);
// Returns the number of padding stack slots needed when we have
// 'register_count' register slots.
@@ -268,44 +278,45 @@ class BuiltinContinuationFrameConstants : public TypedFrameConstants {
// Behaves like an exit frame but with target and new target args.
class BuiltinExitFrameConstants : public CommonFrameConstants {
public:
- static const int kNewTargetOffset = kCallerPCOffset + 1 * kPointerSize;
- static const int kTargetOffset = kNewTargetOffset + 1 * kPointerSize;
- static const int kArgcOffset = kTargetOffset + 1 * kPointerSize;
- static const int kPaddingOffset = kArgcOffset + 1 * kPointerSize;
- static const int kFirstArgumentOffset = kPaddingOffset + 1 * kPointerSize;
- static const int kNumExtraArgsWithReceiver = 5;
+ static constexpr int kNewTargetOffset = kCallerPCOffset + 1 * kPointerSize;
+ static constexpr int kTargetOffset = kNewTargetOffset + 1 * kPointerSize;
+ static constexpr int kArgcOffset = kTargetOffset + 1 * kPointerSize;
+ static constexpr int kPaddingOffset = kArgcOffset + 1 * kPointerSize;
+ static constexpr int kFirstArgumentOffset = kPaddingOffset + 1 * kPointerSize;
+ static constexpr int kNumExtraArgsWithReceiver = 5;
};
class InterpreterFrameConstants : public AllStatic {
public:
// Fixed frame includes bytecode array and bytecode offset.
- static const int kFixedFrameSize =
+ static constexpr int kFixedFrameSize =
StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
- static const int kFixedFrameSizeFromFp =
+ static constexpr int kFixedFrameSizeFromFp =
StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
// FP-relative.
- static const int kLastParamFromFp = StandardFrameConstants::kCallerSPOffset;
- static const int kCallerPCOffsetFromFp =
+ static constexpr int kLastParamFromFp =
+ StandardFrameConstants::kCallerSPOffset;
+ static constexpr int kCallerPCOffsetFromFp =
StandardFrameConstants::kCallerPCOffset;
- static const int kBytecodeArrayFromFp =
+ static constexpr int kBytecodeArrayFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
- static const int kBytecodeOffsetFromFp =
+ static constexpr int kBytecodeOffsetFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
- static const int kRegisterFileFromFp =
+ static constexpr int kRegisterFileFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
- static const int kExpressionsOffset = kRegisterFileFromFp;
+ static constexpr int kExpressionsOffset = kRegisterFileFromFp;
// Number of fixed slots in addition to a {StandardFrame}.
- static const int kExtraSlotCount =
+ static constexpr int kExtraSlotCount =
InterpreterFrameConstants::kFixedFrameSize / kPointerSize -
StandardFrameConstants::kFixedFrameSize / kPointerSize;
// Expression index for {StandardFrame::GetExpressionAddress}.
- static const int kBytecodeArrayExpressionIndex = -2;
- static const int kBytecodeOffsetExpressionIndex = -1;
- static const int kRegisterFileExpressionIndex = 0;
+ static constexpr int kBytecodeArrayExpressionIndex = -2;
+ static constexpr int kBytecodeOffsetExpressionIndex = -1;
+ static constexpr int kRegisterFileExpressionIndex = 0;
// Returns the number of stack slots needed for 'register_count' registers.
// This is needed because some architectures must pad the stack frame with
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index f5a14471ba..b3efd79780 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -206,9 +206,6 @@ inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
inline JsToWasmFrame::JsToWasmFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
-inline WasmToWasmFrame::WasmToWasmFrame(StackFrameIteratorBase* iterator)
- : StubFrame(iterator) {}
-
inline CWasmEntryFrame::CWasmEntryFrame(StackFrameIteratorBase* iterator)
: StubFrame(iterator) {}
@@ -228,6 +225,11 @@ inline JavaScriptBuiltinContinuationFrame::JavaScriptBuiltinContinuationFrame(
StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
+inline JavaScriptBuiltinContinuationWithCatchFrame::
+ JavaScriptBuiltinContinuationWithCatchFrame(
+ StackFrameIteratorBase* iterator)
+ : JavaScriptBuiltinContinuationFrame(iterator) {}
+
inline JavaScriptFrameIterator::JavaScriptFrameIterator(
Isolate* isolate)
: iterator_(isolate) {
@@ -270,7 +272,7 @@ JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
inline StackFrame* SafeStackFrameIterator::frame() const {
DCHECK(!done());
DCHECK(frame_->is_java_script() || frame_->is_exit() ||
- frame_->is_builtin_exit());
+ frame_->is_builtin_exit() || frame_->is_wasm());
return frame_;
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index a63a85e7fc..4bf0483daa 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -169,7 +169,8 @@ bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
namespace {
-bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
+bool IsInterpreterFramePc(Isolate* isolate, Address pc,
+ StackFrame::State* state) {
Code* interpreter_entry_trampoline =
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
Code* interpreter_bytecode_advance =
@@ -177,12 +178,30 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
Code* interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return (pc >= interpreter_entry_trampoline->InstructionStart() &&
- pc < interpreter_entry_trampoline->InstructionEnd()) ||
- (pc >= interpreter_bytecode_advance->InstructionStart() &&
- pc < interpreter_bytecode_advance->InstructionEnd()) ||
- (pc >= interpreter_bytecode_dispatch->InstructionStart() &&
- pc < interpreter_bytecode_dispatch->InstructionEnd());
+ if (interpreter_entry_trampoline->contains(pc) ||
+ interpreter_bytecode_advance->contains(pc) ||
+ interpreter_bytecode_dispatch->contains(pc)) {
+ return true;
+ } else if (FLAG_interpreted_frames_native_stack) {
+ intptr_t marker = Memory::intptr_at(
+ state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
+ Object* maybe_function =
+ Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
+ // There's no need to run a full ContainsSlow if we know the frame can't be
+ // an InterpretedFrame, so we do these fast checks first
+ if (StackFrame::IsTypeMarker(marker) || maybe_function->IsSmi()) {
+ return false;
+ } else if (!isolate->heap()->code_space()->ContainsSlow(pc)) {
+ return false;
+ }
+ interpreter_entry_trampoline =
+ isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
+ return interpreter_entry_trampoline->is_interpreter_trampoline_builtin();
+ } else {
+ return false;
+ }
}
DISABLE_ASAN Address ReadMemoryAt(Address address) {
@@ -192,12 +211,8 @@ DISABLE_ASAN Address ReadMemoryAt(Address address) {
WasmInstanceObject* LookupWasmInstanceObjectFromStandardFrame(
const StandardFrame* frame) {
// TODO(titzer): WASM instances cannot be found from the code in the future.
- WasmInstanceObject* ret =
- FLAG_wasm_jit_to_native
- ? WasmInstanceObject::GetOwningInstance(
- frame->isolate()->wasm_engine()->code_manager()->LookupCode(
- frame->pc()))
- : WasmInstanceObject::GetOwningInstanceGC(frame->LookupCode());
+ WasmInstanceObject* ret = WasmInstanceObject::GetOwningInstance(
+ frame->isolate()->wasm_engine()->code_manager()->LookupCode(frame->pc()));
// This is a live stack frame, there must be a live wasm instance available.
DCHECK_NOT_NULL(ret);
return ret;
@@ -233,7 +248,7 @@ SafeStackFrameIterator::SafeStackFrameIterator(
if (IsValidStackAddress(sp)) {
MSAN_MEMORY_IS_INITIALIZED(sp, kPointerSize);
Address tos = ReadMemoryAt(reinterpret_cast<Address>(sp));
- if (IsInterpreterFramePc(isolate, tos)) {
+ if (IsInterpreterFramePc(isolate, tos, &state)) {
state.pc_address = reinterpret_cast<Address*>(sp);
advance_frame = false;
}
@@ -301,7 +316,7 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
if (!frame_) return;
// Check that we have actually moved to the previous frame in the stack.
- if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
+ if (frame_->sp() <= last_sp || frame_->fp() <= last_fp) {
frame_ = nullptr;
}
}
@@ -363,7 +378,7 @@ void SafeStackFrameIterator::Advance() {
last_callback_scope = external_callback_scope_;
external_callback_scope_ = external_callback_scope_->previous();
}
- if (frame_->is_java_script()) break;
+ if (frame_->is_java_script() || frame_->is_wasm()) break;
if (frame_->is_exit() || frame_->is_builtin_exit()) {
// Some of the EXIT frames may have ExternalCallbackScope allocated on
// top of them. In that case the scope corresponds to the first EXIT
@@ -440,8 +455,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (!StackFrame::IsTypeMarker(marker)) {
if (maybe_function->IsSmi()) {
return NATIVE;
- } else if (IsInterpreterFramePc(iterator->isolate(),
- *(state->pc_address))) {
+ } else if (IsInterpreterFramePc(iterator->isolate(), *(state->pc_address),
+ state)) {
return INTERPRETED;
} else {
return OPTIMIZED;
@@ -449,10 +464,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
} else {
Address pc = *(state->pc_address);
- // If FLAG_wasm_jit_to_native is disabled, we still have an empty
- // wasm_code_manager, and this test will be false. This is easier to read
- // than checking the flag, then getting the code, and then, if both are true
- // (non-null, respectivelly), going down the wasm_code path.
+ // If the {pc} does not point into WebAssembly code we can rely on the
+ // returned {wasm_code} to be null and fall back to {GetContainingCode}.
wasm::WasmCode* wasm_code =
iterator->isolate()->wasm_engine()->code_manager()->LookupCode(pc);
if (wasm_code != nullptr) {
@@ -466,7 +479,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (StackFrame::IsTypeMarker(marker)) break;
return BUILTIN;
case wasm::WasmCode::kWasmToJsWrapper:
- case wasm::WasmCode::kWasmToWasmWrapper:
return WASM_TO_JS;
default:
UNREACHABLE();
@@ -518,6 +530,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case EXIT:
case BUILTIN_CONTINUATION:
case JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case BUILTIN_EXIT:
case STUB:
case INTERNAL:
@@ -774,7 +787,7 @@ Object* StandardFrame::context() const {
int StandardFrame::position() const {
AbstractCode* code = AbstractCode::cast(LookupCode());
- int code_offset = static_cast<int>(pc() - code->instruction_start());
+ int code_offset = static_cast<int>(pc() - code->InstructionStart());
return code->SourcePosition(code_offset);
}
@@ -819,9 +832,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Find the code and compute the safepoint information.
Address inner_pointer = pc();
const wasm::WasmCode* wasm_code =
- FLAG_wasm_jit_to_native
- ? isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer)
- : nullptr;
+ isolate()->wasm_engine()->code_manager()->LookupCode(inner_pointer);
SafepointEntry safepoint_entry;
uint32_t stack_slots;
Code* code = nullptr;
@@ -863,6 +874,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case EXIT:
case BUILTIN_CONTINUATION:
case JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case BUILTIN_EXIT:
case ARGUMENTS_ADAPTOR:
case STUB:
@@ -870,7 +882,6 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case CONSTRUCT:
case JS_TO_WASM:
case WASM_TO_JS:
- case WASM_TO_WASM:
case WASM_COMPILED:
case WASM_INTERPRETER_ENTRY:
case C_WASM_ENTRY:
@@ -1020,10 +1031,18 @@ Code* JavaScriptFrame::unchecked_code() const {
int JavaScriptFrame::GetNumberOfIncomingArguments() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
-
return function()->shared()->internal_formal_parameter_count();
}
+int OptimizedFrame::GetNumberOfIncomingArguments() const {
+ Code* code = LookupCode();
+ if (code->kind() == Code::BUILTIN) {
+ return static_cast<int>(
+ Memory::intptr_at(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
+ } else {
+ return JavaScriptFrame::GetNumberOfIncomingArguments();
+ }
+}
Address JavaScriptFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
@@ -1220,6 +1239,30 @@ int JavaScriptBuiltinContinuationFrame::ComputeParametersCount() const {
return Smi::ToInt(argc_object);
}
+intptr_t JavaScriptBuiltinContinuationFrame::GetSPToFPDelta() const {
+ Address height_slot =
+ fp() + BuiltinContinuationFrameConstants::kFrameSPtoFPDeltaAtDeoptimize;
+ intptr_t height = Smi::ToInt(*reinterpret_cast<Smi**>(height_slot));
+ return height;
+}
+
+Object* JavaScriptBuiltinContinuationFrame::context() const {
+ return Memory::Object_at(
+ fp() + BuiltinContinuationFrameConstants::kBuiltinContextOffset);
+}
+
+void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
+ Object* exception) {
+ Address exception_argument_slot =
+ fp() + JavaScriptFrameConstants::kLastParameterOffset +
+ kPointerSize; // Skip over return value slot.
+
+ // Only allow setting exception if previous value was the hole.
+ CHECK_EQ(isolate()->heap()->the_hole_value(),
+ Memory::Object_at(exception_argument_slot));
+ Memory::Object_at(exception_argument_slot) = exception;
+}
+
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
Isolate* isolate, Object* receiver, JSFunction* function,
AbstractCode* abstract_code, int code_offset, bool is_constructor)
@@ -1306,7 +1349,7 @@ Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
}
FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
- Isolate* isolate, Handle<WasmInstanceObject> instance, WasmCodeWrapper code,
+ Isolate* isolate, Handle<WasmInstanceObject> instance, wasm::WasmCode* code,
int code_offset, bool at_to_number_conversion)
: WasmFrameSummary(isolate, WASM_COMPILED, instance,
at_to_number_conversion),
@@ -1314,15 +1357,7 @@ FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
code_offset_(code_offset) {}
uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
- if (code().IsCodeObject()) {
- FixedArray* deopt_data = code().GetCode()->deoptimization_data();
- DCHECK_EQ(2, deopt_data->length());
- DCHECK(deopt_data->get(1)->IsSmi());
- int val = Smi::ToInt(deopt_data->get(1));
- DCHECK_LE(0, val);
- return static_cast<uint32_t>(val);
- }
- return code().GetWasmCode()->index();
+ return code()->index();
}
int FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
@@ -1330,10 +1365,7 @@ int FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
int position = 0;
// Subtract one because the current PC is one instruction after the call site.
offset--;
- Handle<ByteArray> source_position_table(
- ByteArray::cast(code->owner()->compiled_module()->source_positions()->get(
- code->index())));
- for (SourcePositionTableIterator iterator(source_position_table);
+ for (SourcePositionTableIterator iterator(code->source_positions());
!iterator.done() && iterator.code_offset() <= offset;
iterator.Advance()) {
position = iterator.source_position().ScriptOffset();
@@ -1342,10 +1374,7 @@ int FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
}
int FrameSummary::WasmCompiledFrameSummary::byte_offset() const {
- if (code().IsCodeObject()) {
- return AbstractCode::cast(*code().GetCode())->SourcePosition(code_offset());
- }
- return GetWasmSourcePosition(code_.GetWasmCode(), code_offset());
+ return GetWasmSourcePosition(code_, code_offset());
}
FrameSummary::WasmInterpretedFrameSummary::WasmInterpretedFrameSummary(
@@ -1450,7 +1479,9 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
bool is_constructor = IsConstructor();
for (auto it = translated.begin(); it != translated.end(); it++) {
if (it->kind() == TranslatedFrame::kInterpretedFunction ||
- it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ it->kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
Handle<SharedFunctionInfo> shared_info = it->shared_info();
// The translation commands are ordered and the function is always
@@ -1470,7 +1501,9 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// the translation corresponding to the frame type in question.
Handle<AbstractCode> abstract_code;
unsigned code_offset;
- if (it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation) {
+ if (it->kind() == TranslatedFrame::kJavaScriptBuiltinContinuation ||
+ it->kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch) {
code_offset = 0;
abstract_code =
handle(AbstractCode::cast(isolate()->builtins()->builtin(
@@ -1589,7 +1622,9 @@ void OptimizedFrame::GetFunctions(
while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::INTERPRETED_FRAME ||
- opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME) {
+ opcode == Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME ||
+ opcode ==
+ Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME) {
it.Next(); // Skip bailout id.
jsframe_count--;
@@ -1625,7 +1660,7 @@ int InterpretedFrame::position() const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
- HandlerTable table(function()->shared()->bytecode_array());
+ HandlerTable table(function()->shared()->GetBytecodeArray());
return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
@@ -1694,7 +1729,7 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
AbstractCode* abstract_code =
- AbstractCode::cast(function()->shared()->bytecode_array());
+ AbstractCode::cast(function()->shared()->GetBytecodeArray());
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
IsConstructor());
@@ -1738,14 +1773,12 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
accumulator->Add("WASM [");
Script* script = this->script();
accumulator->PrintName(script->name());
- Address instruction_start = FLAG_wasm_jit_to_native
- ? isolate()
- ->wasm_engine()
- ->code_manager()
- ->LookupCode(pc())
- ->instructions()
- .start()
- : LookupCode()->instruction_start();
+ Address instruction_start = isolate()
+ ->wasm_engine()
+ ->code_manager()
+ ->LookupCode(pc())
+ ->instructions()
+ .start();
int pc = static_cast<int>(this->pc() - instruction_start);
Vector<const uint8_t> raw_func_name =
shared()->GetRawFunctionName(this->function_index());
@@ -1771,11 +1804,8 @@ Address WasmCompiledFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-WasmCodeWrapper WasmCompiledFrame::wasm_code() const {
- return FLAG_wasm_jit_to_native
- ? WasmCodeWrapper(
- isolate()->wasm_engine()->code_manager()->LookupCode(pc()))
- : WasmCodeWrapper(Handle<Code>(LookupCode(), isolate()));
+wasm::WasmCode* WasmCompiledFrame::wasm_code() const {
+ return isolate()->wasm_engine()->code_manager()->LookupCode(pc());
}
WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
@@ -1804,8 +1834,8 @@ int WasmCompiledFrame::position() const {
void WasmCompiledFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
- WasmCodeWrapper code = wasm_code();
- int offset = static_cast<int>(pc() - code.instructions().start());
+ wasm::WasmCode* code = wasm_code();
+ int offset = static_cast<int>(pc() - code->instructions().start());
Handle<WasmInstanceObject> instance(
LookupWasmInstanceObjectFromStandardFrame(this), isolate());
FrameSummary::WasmCompiledFrameSummary summary(
@@ -1817,22 +1847,14 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
// Check whether our callee is a WASM_TO_JS frame, and this frame is at the
// ToNumber conversion call.
Address callee_pc = reinterpret_cast<Address>(this->callee_pc());
- int pos = -1;
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode* code =
- callee_pc
- ? isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc)
- : nullptr;
- if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
- int offset = static_cast<int>(callee_pc - code->instructions().start());
- pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code,
- offset);
- } else {
- Code* code = callee_pc ? isolate()->FindCodeObject(callee_pc) : nullptr;
- if (!code || code->kind() != Code::WASM_TO_JS_FUNCTION) return false;
- int offset = static_cast<int>(callee_pc - code->instruction_start());
- pos = AbstractCode::cast(code)->SourcePosition(offset);
- }
+ wasm::WasmCode* code =
+ callee_pc
+ ? isolate()->wasm_engine()->code_manager()->LookupCode(callee_pc)
+ : nullptr;
+ if (!code || code->kind() != wasm::WasmCode::kWasmToJsWrapper) return false;
+ int offset = static_cast<int>(callee_pc - code->instructions().start());
+ int pos = FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ code, offset);
DCHECK(pos == 0 || pos == 1);
// The imported call has position 0, ToNumber has position 1.
return !!pos;
@@ -1840,13 +1862,6 @@ bool WasmCompiledFrame::at_to_number_conversion() const {
int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
- if (!FLAG_wasm_jit_to_native) {
- Code* code = LookupCode();
- HandlerTable table(code);
- int pc_offset = static_cast<int>(pc() - code->entry());
- *stack_slots = code->stack_slots();
- return table.LookupReturn(pc_offset);
- }
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (!code->IsAnonymous() && code->handler_table_offset() > 0) {
@@ -1887,13 +1902,7 @@ void WasmInterpreterEntryFrame::Summarize(
}
}
-Code* WasmInterpreterEntryFrame::unchecked_code() const {
- if (FLAG_wasm_jit_to_native) {
- UNIMPLEMENTED();
- } else {
- return isolate()->FindCodeObject(pc());
- }
-}
+Code* WasmInterpreterEntryFrame::unchecked_code() const { UNREACHABLE(); }
// TODO(titzer): deprecate this method.
WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
@@ -1959,6 +1968,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
Code* code = nullptr;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
+ accumulator->Add(" [%p]", function);
// Get scope information for nicer output, if possible. If code is nullptr, or
// doesn't contain scope info, scope_info will return 0 for the number of
@@ -1982,7 +1992,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
int line = script->GetLineNumber(source_pos) + 1;
accumulator->Add(":%d] [bytecode=%p offset=%d]", line, bytecodes, offset);
} else {
- int function_start_pos = shared->start_position();
+ int function_start_pos = shared->StartPosition();
int line = script->GetLineNumber(function_start_pos) + 1;
accumulator->Add(":~%d] [pc=%p]", line, pc);
}
@@ -2132,9 +2142,7 @@ void JavaScriptFrame::Iterate(RootVisitor* v) const {
void InternalFrame::Iterate(RootVisitor* v) const {
wasm::WasmCode* wasm_code =
- FLAG_wasm_jit_to_native
- ? isolate()->wasm_engine()->code_manager()->LookupCode(pc())
- : nullptr;
+ isolate()->wasm_engine()->code_manager()->LookupCode(pc());
if (wasm_code != nullptr) {
DCHECK(wasm_code->kind() == wasm::WasmCode::kLazyStub);
} else {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 2bea6a3ca3..d8438acfd7 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -93,7 +93,6 @@ class StackHandler BASE_EMBEDDED {
V(OPTIMIZED, OptimizedFrame) \
V(WASM_COMPILED, WasmCompiledFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
- V(WASM_TO_WASM, WasmToWasmFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
@@ -101,6 +100,8 @@ class StackHandler BASE_EMBEDDED {
V(STUB, StubFrame) \
V(BUILTIN_CONTINUATION, BuiltinContinuationFrame) \
V(JAVA_SCRIPT_BUILTIN_CONTINUATION, JavaScriptBuiltinContinuationFrame) \
+ V(JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH, \
+ JavaScriptBuiltinContinuationWithCatchFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) \
@@ -214,6 +215,9 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script_builtin_continuation() const {
return type() == JAVA_SCRIPT_BUILTIN_CONTINUATION;
}
+ bool is_java_script_builtin_with_catch_continuation() const {
+ return type() == JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
+ }
bool is_construct() const { return type() == CONSTRUCT; }
bool is_builtin_exit() const { return type() == BUILTIN_EXIT; }
virtual bool is_standard() const { return false; }
@@ -221,7 +225,8 @@ class StackFrame BASE_EMBEDDED {
bool is_java_script() const {
Type type = this->type();
return (type == OPTIMIZED) || (type == INTERPRETED) || (type == BUILTIN) ||
- (type == JAVA_SCRIPT_BUILTIN_CONTINUATION);
+ (type == JAVA_SCRIPT_BUILTIN_CONTINUATION) ||
+ (type == JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH);
}
bool is_wasm() const {
Type type = this->type();
@@ -549,16 +554,16 @@ class FrameSummary BASE_EMBEDDED {
class WasmCompiledFrameSummary : public WasmFrameSummary {
public:
WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>,
- WasmCodeWrapper, int code_offset,
+ wasm::WasmCode*, int code_offset,
bool at_to_number_conversion);
uint32_t function_index() const;
- WasmCodeWrapper code() const { return code_; }
+ wasm::WasmCode* code() const { return code_; }
int code_offset() const { return code_offset_; }
int byte_offset() const;
static int GetWasmSourcePosition(const wasm::WasmCode* code, int offset);
private:
- WasmCodeWrapper const code_;
+ wasm::WasmCode* const code_;
int code_offset_;
};
@@ -848,6 +853,8 @@ class OptimizedFrame : public JavaScriptFrame {
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
+ int GetNumberOfIncomingArguments() const override;
+
private:
friend class StackFrameIteratorBase;
@@ -973,7 +980,7 @@ class WasmCompiledFrame final : public StandardFrame {
// Accessors.
WasmInstanceObject* wasm_instance() const; // TODO(titzer): deprecate.
- WasmCodeWrapper wasm_code() const;
+ wasm::WasmCode* wasm_code() const;
uint32_t function_index() const;
Script* script() const override;
int position() const override;
@@ -1059,17 +1066,6 @@ class JsToWasmFrame : public StubFrame {
friend class StackFrameIteratorBase;
};
-class WasmToWasmFrame : public StubFrame {
- public:
- Type type() const override { return WASM_TO_WASM; }
-
- protected:
- inline explicit WasmToWasmFrame(StackFrameIteratorBase* iterator);
-
- private:
- friend class StackFrameIteratorBase;
-};
-
class CWasmEntryFrame : public StubFrame {
public:
Type type() const override { return C_WASM_ENTRY; }
@@ -1150,6 +1146,9 @@ class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
}
int ComputeParametersCount() const override;
+ intptr_t GetSPToFPDelta() const;
+
+ Object* context() const override;
protected:
inline explicit JavaScriptBuiltinContinuationFrame(
@@ -1159,6 +1158,30 @@ class JavaScriptBuiltinContinuationFrame : public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
+class JavaScriptBuiltinContinuationWithCatchFrame
+ : public JavaScriptBuiltinContinuationFrame {
+ public:
+ Type type() const override {
+ return JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH;
+ }
+
+ static JavaScriptBuiltinContinuationWithCatchFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_java_script_builtin_with_catch_continuation());
+ return static_cast<JavaScriptBuiltinContinuationWithCatchFrame*>(frame);
+ }
+
+ // Patch in the exception object at the appropriate location into the stack
+ // frame.
+ void SetException(Object* exception);
+
+ protected:
+ inline explicit JavaScriptBuiltinContinuationWithCatchFrame(
+ StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class StackFrameIteratorBase BASE_EMBEDDED {
public:
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index de56faa4fd..8fcd410643 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -979,11 +979,11 @@ class CodeDescription BASE_EMBEDDED {
}
uintptr_t CodeStart() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_start());
+ return reinterpret_cast<uintptr_t>(code_->InstructionStart());
}
uintptr_t CodeEnd() const {
- return reinterpret_cast<uintptr_t>(code_->instruction_end());
+ return reinterpret_cast<uintptr_t>(code_->InstructionEnd());
}
uintptr_t CodeSize() const {
@@ -996,11 +996,7 @@ class CodeDescription BASE_EMBEDDED {
Script* script() { return Script::cast(shared_info_->script()); }
- bool IsLineInfoAvailable() {
- return has_script() && script()->source()->IsString() &&
- script()->HasValidSource() && script()->name()->IsString() &&
- lineinfo_ != nullptr;
- }
+ bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
@@ -1015,11 +1011,22 @@ class CodeDescription BASE_EMBEDDED {
#endif
std::unique_ptr<char[]> GetFilename() {
- return String::cast(script()->name())->ToCString();
+ if (shared_info_ != nullptr) {
+ return String::cast(script()->name())->ToCString();
+ } else {
+ std::unique_ptr<char[]> result(new char[1]);
+ result[0] = 0;
+ return result;
+ }
}
- int GetScriptLineNumber(int pos) { return script()->GetLineNumber(pos) + 1; }
-
+ int GetScriptLineNumber(int pos) {
+ if (shared_info_ != nullptr) {
+ return script()->GetLineNumber(pos) + 1;
+ } else {
+ return 0;
+ }
+ }
private:
const char* name_;
@@ -2167,6 +2174,7 @@ static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
+ if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index fe87060fb0..90467168ab 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -270,10 +270,8 @@ class GlobalHandles::Node {
// Zap with something dangerous.
*location() = reinterpret_cast<Object*>(0x6057CA11);
- typedef v8::WeakCallbackInfo<void> Data;
- auto callback = reinterpret_cast<Data::Callback>(weak_callback_);
- pending_phantom_callbacks->push_back(
- PendingPhantomCallback(this, callback, parameter(), embedder_fields));
+ pending_phantom_callbacks->push_back(PendingPhantomCallback(
+ this, weak_callback_, parameter(), embedder_fields));
DCHECK(IsInUse());
set_state(NEAR_DEATH);
}
@@ -877,8 +875,12 @@ void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
callback_ = nullptr;
callback(data);
if (node_ != nullptr) {
- // Transition to second pass state.
- DCHECK(node_->state() == Node::FREE);
+ // Transition to second pass. It is required that the first pass callback
+ // resets the handle using |v8::PersistentBase::Reset|. Also see comments on
+ // |v8::WeakCallbackInfo|.
+ CHECK_WITH_MSG(Node::FREE == node_->state(),
+ "Handle not reset in first callback. See comments on "
+ "|v8::WeakCallbackInfo|.");
node_ = nullptr;
}
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 7ffbf99d61..a41bd326c7 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -11,6 +11,7 @@
#include <limits>
#include <ostream>
+#include "include/v8.h"
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
@@ -273,10 +274,6 @@ constexpr int kUC16Size = sizeof(uc16); // NOLINT
// 128 bit SIMD value size.
constexpr int kSimd128Size = 16;
-// Round up n to be a multiple of sz, where sz is a power of 2.
-#define ROUND_UP(n, sz) (((n) + ((sz) - 1)) & ~((sz) - 1))
-
-
// FUNCTION_ADDR(f) gets the address of a C function f.
#define FUNCTION_ADDR(f) \
(reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(f)))
@@ -413,14 +410,13 @@ constexpr int kCodeAlignmentBits = 5;
constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
-// Weak references are tagged using the second bit in a pointer.
-constexpr int kWeakReferenceTag = 3;
-constexpr int kWeakReferenceTagSize = 2;
-constexpr intptr_t kWeakReferenceTagMask = (1 << kWeakReferenceTagSize) - 1;
+const intptr_t kWeakHeapObjectMask = 1 << 1;
+const intptr_t kClearedWeakHeapObject = 3;
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a failure.
#ifdef V8_HOST_ARCH_64_BIT
+constexpr uint64_t kClearedFreeMemoryValue = 0;
constexpr uint64_t kZapValue = uint64_t{0xdeadbeedbeadbeef};
constexpr uint64_t kHandleZapValue = uint64_t{0x1baddead0baddeaf};
constexpr uint64_t kGlobalHandleZapValue = uint64_t{0x1baffed00baffedf};
@@ -429,6 +425,7 @@ constexpr uint64_t kDebugZapValue = uint64_t{0xbadbaddbbadbaddb};
constexpr uint64_t kSlotsZapValue = uint64_t{0xbeefdeadbeefdeef};
constexpr uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
+constexpr uint32_t kClearedFreeMemoryValue = 0;
constexpr uint32_t kZapValue = 0xdeadbeef;
constexpr uint32_t kHandleZapValue = 0xbaddeaf;
constexpr uint32_t kGlobalHandleZapValue = 0xbaffedf;
@@ -477,6 +474,7 @@ template <typename T> class MaybeHandle;
template <typename T> class Handle;
class Heap;
class HeapObject;
+class HeapObjectReference;
class IC;
class InterceptorInfo;
class Isolate;
@@ -489,10 +487,12 @@ class MacroAssembler;
class Map;
class MapSpace;
class MarkCompactCollector;
+class MaybeObject;
class NewSpace;
class Object;
class OldSpace;
class ParameterCount;
+class ReadOnlySpace;
class Foreign;
class Scope;
class DeclarationScope;
@@ -520,20 +520,22 @@ typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
-// Keep this enum in sync with the ObjectSpace enum in v8.h
enum AllocationSpace {
+ // TODO(v8:7464): Actually map this space's memory as read-only.
+ RO_SPACE, // Immortal, immovable and immutable objects,
NEW_SPACE, // Semispaces collected with copying collector.
OLD_SPACE, // May contain pointers to new space.
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
LO_SPACE, // Promoted large objects.
- FIRST_SPACE = NEW_SPACE,
+ FIRST_SPACE = RO_SPACE,
LAST_SPACE = LO_SPACE,
- FIRST_PAGED_SPACE = OLD_SPACE,
- LAST_PAGED_SPACE = MAP_SPACE
+ FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
+ LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
};
-constexpr int kSpaceTagSize = 3;
+constexpr int kSpaceTagSize = 4;
+STATIC_ASSERT(FIRST_SPACE == 0);
enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
@@ -867,6 +869,26 @@ enum ScopeType : uint8_t {
WITH_SCOPE // The scope introduced by with.
};
+inline std::ostream& operator<<(std::ostream& os, ScopeType type) {
+ switch (type) {
+ case ScopeType::EVAL_SCOPE:
+ return os << "EVAL_SCOPE";
+ case ScopeType::FUNCTION_SCOPE:
+ return os << "FUNCTION_SCOPE";
+ case ScopeType::MODULE_SCOPE:
+ return os << "MODULE_SCOPE";
+ case ScopeType::SCRIPT_SCOPE:
+ return os << "SCRIPT_SCOPE";
+ case ScopeType::CATCH_SCOPE:
+ return os << "CATCH_SCOPE";
+ case ScopeType::BLOCK_SCOPE:
+ return os << "BLOCK_SCOPE";
+ case ScopeType::WITH_SCOPE:
+ return os << "WITH_SCOPE";
+ }
+ UNREACHABLE();
+}
+
// AllocationSiteMode controls whether allocations are tracked by an allocation
// site.
enum AllocationSiteMode {
@@ -1178,7 +1200,7 @@ inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
}
enum class InterpreterPushArgsMode : unsigned {
- kJSFunction,
+ kArrayFunction,
kWithFinalSpread,
kOther
};
@@ -1190,8 +1212,8 @@ inline size_t hash_value(InterpreterPushArgsMode mode) {
inline std::ostream& operator<<(std::ostream& os,
InterpreterPushArgsMode mode) {
switch (mode) {
- case InterpreterPushArgsMode::kJSFunction:
- return os << "JSFunction";
+ case InterpreterPushArgsMode::kArrayFunction:
+ return os << "ArrayFunction";
case InterpreterPushArgsMode::kWithFinalSpread:
return os << "WithFinalSpread";
case InterpreterPushArgsMode::kOther:
@@ -1346,6 +1368,18 @@ inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
UNREACHABLE();
}
+enum class CollectionKind { kMap, kSet };
+
+inline std::ostream& operator<<(std::ostream& os, CollectionKind kind) {
+ switch (kind) {
+ case CollectionKind::kMap:
+ return os << "CollectionKind::kMap";
+ case CollectionKind::kSet:
+ return os << "CollectionKind::kSet";
+ }
+ UNREACHABLE();
+}
+
// Flags for the runtime function kDefineDataPropertyInLiteral. A property can
// be enumerable or not, and, in case of functions, the function name
// can be set or not.
@@ -1450,6 +1484,46 @@ enum IsolateAddressId {
kIsolateAddressCount
};
+V8_INLINE static bool HasWeakHeapObjectTag(const internal::MaybeObject* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+ kWeakHeapObjectTag);
+}
+
+// Object* should never have the weak tag; this variant is for overzealous
+// checking.
+V8_INLINE static bool HasWeakHeapObjectTag(const Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+ kWeakHeapObjectTag);
+}
+
+V8_INLINE static bool IsClearedWeakHeapObject(MaybeObject* value) {
+ return reinterpret_cast<intptr_t>(value) == kClearedWeakHeapObject;
+}
+
+V8_INLINE static HeapObject* RemoveWeakHeapObjectMask(
+ HeapObjectReference* value) {
+ return reinterpret_cast<HeapObject*>(reinterpret_cast<intptr_t>(value) &
+ ~kWeakHeapObjectMask);
+}
+
+V8_INLINE static HeapObjectReference* AddWeakHeapObjectMask(HeapObject* value) {
+ return reinterpret_cast<HeapObjectReference*>(
+ reinterpret_cast<intptr_t>(value) | kWeakHeapObjectMask);
+}
+
+V8_INLINE static MaybeObject* AddWeakHeapObjectMask(MaybeObject* value) {
+ return reinterpret_cast<MaybeObject*>(reinterpret_cast<intptr_t>(value) |
+ kWeakHeapObjectMask);
+}
+
+enum class HeapObjectReferenceType {
+ WEAK,
+ STRONG,
+};
+
+enum class PoisoningMitigationLevel { kOff, kOn };
+enum class LoadSensitivity { kSafe, kNeedsPoisoning };
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index ef4d4b155a..cfa65d11b8 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -16,12 +16,9 @@ namespace internal {
// Handles should be trivially copyable so that they can be efficiently passed
// by value. If they are not trivially copyable, they cannot be passed in
// registers.
-static_assert(IS_TRIVIALLY_COPYABLE(HandleBase),
- "HandleBase should be trivially copyable");
-static_assert(IS_TRIVIALLY_COPYABLE(Handle<Object>),
- "Handle<Object> should be trivially copyable");
-static_assert(IS_TRIVIALLY_COPYABLE(MaybeHandle<Object>),
- "MaybeHandle<Object> should be trivially copyable");
+ASSERT_TRIVIALLY_COPYABLE(HandleBase);
+ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
+ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
#ifdef DEBUG
bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index ae06892675..82ecd76657 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -161,6 +161,7 @@
V(query_colon_string, "(?:)") \
V(RangeError_string, "RangeError") \
V(raw_string, "raw") \
+ V(ReconfigureToDataProperty_string, "ReconfigureToDataProperty") \
V(ReferenceError_string, "ReferenceError") \
V(RegExp_string, "RegExp") \
V(regexp_to_string, "[object RegExp]") \
@@ -200,8 +201,8 @@
V(toJSON_string, "toJSON") \
V(toString_string, "toString") \
V(true_string, "true") \
- V(type_string, "type") \
V(TypeError_string, "TypeError") \
+ V(type_string, "type") \
V(Uint16Array_string, "Uint16Array") \
V(Uint32Array_string, "Uint32Array") \
V(Uint8Array_string, "Uint8Array") \
@@ -242,6 +243,7 @@
V(intl_initialized_marker_symbol) \
V(intl_pattern_symbol) \
V(intl_resolved_symbol) \
+ V(interpreter_trampoline_symbol) \
V(megamorphic_symbol) \
V(native_context_index_symbol) \
V(nonextensible_symbol) \
@@ -263,6 +265,7 @@
V(async_iterator_symbol, Symbol.asyncIterator) \
V(iterator_symbol, Symbol.iterator) \
V(intl_fallback_symbol, IntlFallback) \
+ V(match_all_symbol, Symbol.matchAll) \
V(match_symbol, Symbol.match) \
V(replace_symbol, Symbol.replace) \
V(search_symbol, Symbol.search) \
@@ -309,6 +312,7 @@
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_CLEAR_WEAK_REFERENCES) \
F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index cf0297bb2a..412e4ad05a 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -4,6 +4,7 @@
#include "src/heap/array-buffer-collector.h"
+#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/heap-inl.h"
@@ -47,10 +48,9 @@ class ArrayBufferCollector::FreeingTask final : public CancelableTask {
void ArrayBufferCollector::FreeAllocationsOnBackgroundThread() {
heap_->account_external_memory_concurrently_freed();
- if (heap_->use_tasks() && FLAG_concurrent_array_buffer_freeing) {
- FreeingTask* task = new FreeingTask(heap_);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ if (!heap_->IsTearingDown() && FLAG_concurrent_array_buffer_freeing) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<FreeingTask>(heap_));
} else {
// Fallback for when concurrency is disabled/restricted.
FreeAllocations();
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 568f149b04..8ed4a66664 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -51,12 +51,15 @@ void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer* buffer) {
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t new_retained_size = 0;
+ Isolate* isolate = heap_->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(*it);
- const size_t length = buffer->allocation_length();
+ JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
+ const size_t length = it->second;
if (should_free(buffer)) {
- buffer->FreeBackingStore();
+ JSArrayBuffer::FreeBackingStore(
+ isolate, {buffer->backing_store(), length, buffer->backing_store(),
+ buffer->allocation_mode(), buffer->is_wasm_memory()});
it = array_buffers_.erase(it);
} else {
new_retained_size += length;
@@ -87,7 +90,7 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
void LocalArrayBufferTracker::Add(JSArrayBuffer* buffer, size_t length) {
DCHECK_GE(retained_size_ + length, retained_size_);
retained_size_ += length;
- auto ret = array_buffers_.insert(buffer);
+ auto ret = array_buffers_.insert({buffer, length});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
@@ -100,6 +103,7 @@ void LocalArrayBufferTracker::Remove(JSArrayBuffer* buffer, size_t length) {
TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
+ DCHECK_EQ(length, it->second);
array_buffers_.erase(it);
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 5acf9b9bfb..589756fdc3 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -29,7 +29,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
size_t moved_size = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
- old_buffer = reinterpret_cast<JSArrayBuffer*>(*it);
+ old_buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
new_retained_size += NumberToSize(old_buffer->byte_length());
@@ -51,13 +51,12 @@ void LocalArrayBufferTracker::Process(Callback callback) {
}
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
- // Size of freed memory is computed to avoid looking at dead objects.
- void* allocation_base = old_buffer->allocation_base();
- DCHECK_NOT_NULL(allocation_base);
-
- backing_stores_to_free->emplace_back(allocation_base,
- old_buffer->allocation_length(),
- old_buffer->allocation_mode());
+ // We pass backing_store() and stored length to the collector for freeing
+ // the backing store. Wasm allocations will go through their own tracker
+ // based on the backing store.
+ backing_stores_to_free->emplace_back(
+ old_buffer->backing_store(), it->second, old_buffer->backing_store(),
+ old_buffer->allocation_mode(), old_buffer->is_wasm_memory());
it = array_buffers_.erase(it);
} else {
UNREACHABLE();
@@ -135,5 +134,25 @@ bool ArrayBufferTracker::IsTracked(JSArrayBuffer* buffer) {
}
}
+void ArrayBufferTracker::TearDown(Heap* heap) {
+ // ArrayBuffers can only be found in NEW_SPACE and OLD_SPACE.
+ for (Page* p : *heap->old_space()) {
+ FreeAll(p);
+ }
+ NewSpace* new_space = heap->new_space();
+ if (new_space->to_space().is_committed()) {
+ for (Page* p : new_space->to_space()) {
+ FreeAll(p);
+ }
+ }
+#ifdef DEBUG
+ if (new_space->from_space().is_committed()) {
+ for (Page* p : new_space->from_space()) {
+ DCHECK(!p->contains_array_buffers());
+ }
+ }
+#endif // DEBUG
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 7bfc1b83f6..c9c1a5b645 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
-#include <unordered_set>
+#include <unordered_map>
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
@@ -57,6 +57,9 @@ class ArrayBufferTracker : public AllStatic {
// Returns whether a buffer is currently tracked.
static bool IsTracked(JSArrayBuffer* buffer);
+
+ // Tears down the tracker and frees up all registered array buffers.
+ static void TearDown(Heap* heap);
};
// LocalArrayBufferTracker tracks internalized array buffers.
@@ -108,7 +111,12 @@ class LocalArrayBufferTracker {
}
};
- typedef std::unordered_set<JSArrayBuffer*, Hasher> TrackingData;
+ // Keep track of the backing store and the corresponding length at time of
+ // registering. The length is accessed from JavaScript and can be a
+ // HeapNumber. The reason for tracking the length is that in the case of
+ // length being a HeapNumber, the buffer and its length may be stored on
+ // different memory pages, making it impossible to guarantee order of freeing.
+ typedef std::unordered_map<JSArrayBuffer*, size_t, Hasher> TrackingData;
Heap* heap_;
// The set contains raw heap pointers which are removed by the GC upon
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index e404101753..f2c1985296 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -199,7 +199,7 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
Code* code = Code::cast(obj);
RelocIterator it(code);
int delta = 0;
- const byte* prev_pc = code->instruction_start();
+ const byte* prev_pc = code->raw_instruction_start();
while (!it.done()) {
if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
@@ -209,9 +209,9 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject* obj,
it.next();
}
- DCHECK(code->instruction_start() <= prev_pc &&
- prev_pc <= code->instruction_end());
- delta += static_cast<int>(code->instruction_end() - prev_pc);
+ DCHECK(code->raw_instruction_start() <= prev_pc &&
+ prev_pc <= code->raw_instruction_end());
+ delta += static_cast<int>(code->raw_instruction_end() - prev_pc);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 3aafd191cc..ca2afb8cdf 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,6 +7,8 @@
#include <stack>
#include <unordered_map>
+#include "include/v8config.h"
+#include "src/base/template-utils.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
@@ -90,12 +92,61 @@ class ConcurrentMarkingVisitor final
return marking_state_.GreyToBlack(object);
}
+ bool AllowDefaultJSObjectVisit() { return false; }
+
+ void ProcessStrongHeapObject(HeapObject* host, Object** slot,
+ HeapObject* heap_object) {
+ MarkObject(heap_object);
+ MarkCompactCollector::RecordSlot(host, slot, heap_object);
+ }
+
+ void ProcessWeakHeapObject(HeapObject* host, HeapObjectReference** slot,
+ HeapObject* heap_object) {
+#ifdef THREAD_SANITIZER
+ // Perform a dummy acquire load to tell TSAN that there is no data race
+ // in mark-bit initialization. See MemoryChunk::Initialize for the
+ // corresponding release store.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
+ CHECK_NOT_NULL(chunk->synchronized_heap());
+#endif
+ if (marking_state_.IsBlackOrGrey(heap_object)) {
+ // Weak references with live values are directly processed here to
+ // reduce the processing time of weak cells during the main GC
+ // pause.
+ MarkCompactCollector::RecordSlot(host, slot, heap_object);
+ } else {
+ // If we do not know about liveness of the value, we have to process
+ // the reference when we know the liveness of the whole transitive
+ // closure.
+ weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
+ }
+ }
+
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** slot = start; slot < end; slot++) {
Object* object = base::AsAtomicPointer::Relaxed_Load(slot);
- if (!object->IsHeapObject()) continue;
- MarkObject(HeapObject::cast(object));
- MarkCompactCollector::RecordSlot(host, slot, object);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ if (object->IsHeapObject()) {
+ ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
+ }
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ for (MaybeObject** slot = start; slot < end; slot++) {
+ MaybeObject* object = base::AsAtomicPointer::Relaxed_Load(slot);
+ HeapObject* heap_object;
+ if (object->ToStrongHeapObject(&heap_object)) {
+ // If the reference changes concurrently from strong to weak, the write
+ // barrier will treat the weak reference as strong, so we won't miss the
+ // weak reference.
+ ProcessStrongHeapObject(host, reinterpret_cast<Object**>(slot),
+ heap_object);
+ } else if (object->ToWeakHeapObject(&heap_object)) {
+ ProcessWeakHeapObject(
+ host, reinterpret_cast<HeapObjectReference**>(slot), heap_object);
+ }
}
}
@@ -103,6 +154,7 @@ class ConcurrentMarkingVisitor final
for (int i = 0; i < snapshot.number_of_slots(); i++) {
Object** slot = snapshot.slot(i);
Object* object = snapshot.value(i);
+ DCHECK(!HasWeakHeapObjectTag(object));
if (!object->IsHeapObject()) continue;
MarkObject(HeapObject::cast(object));
MarkCompactCollector::RecordSlot(host, slot, object);
@@ -114,18 +166,19 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitJSObject(Map* map, JSObject* object) {
- int size = JSObject::BodyDescriptor::SizeOf(map, object);
- int used_size = map->UsedInstanceSize();
- DCHECK_LE(used_size, size);
- DCHECK_GE(used_size, JSObject::kHeaderSize);
- const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
- if (!ShouldVisit(object)) return 0;
- VisitPointersInSnapshot(object, snapshot);
- return size;
+ return VisitJSObjectSubclass(map, object);
}
int VisitJSObjectFast(Map* map, JSObject* object) {
- return VisitJSObject(map, object);
+ return VisitJSObjectSubclass(map, object);
+ }
+
+ int VisitJSArrayBuffer(Map* map, JSArrayBuffer* object) {
+ return VisitJSObjectSubclass(map, object);
+ }
+
+ int VisitWasmInstanceObject(Map* map, WasmInstanceObject* object) {
+ return VisitJSObjectSubclass(map, object);
}
int VisitJSApiObject(Map* map, JSObject* object) {
@@ -136,6 +189,17 @@ class ConcurrentMarkingVisitor final
return 0;
}
+ int VisitJSFunction(Map* map, JSFunction* object) {
+ int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
+ int used_size = map->UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, JSObject::kHeaderSize);
+ const SlotSnapshot& snapshot = MakeSlotSnapshotWeak(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
// ===========================================================================
// Strings with pointers =====================================================
// ===========================================================================
@@ -187,17 +251,11 @@ class ConcurrentMarkingVisitor final
// ===========================================================================
int VisitFixedArray(Map* map, FixedArray* object) {
- // The synchronized_length() function checks that the length is a Smi.
- // This is not necessarily the case if the array is being left-trimmed.
- Object* length = object->unchecked_synchronized_length();
- if (!ShouldVisit(object)) return 0;
- // The cached length must be the actual length as the array is not black.
- // Left trimming marks the array black before over-writing the length.
- DCHECK(length->IsSmi());
- int size = FixedArray::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object->map_slot());
- FixedArray::BodyDescriptor::IterateBody(object, size, this);
- return size;
+ return VisitLeftTrimmableArray(map, object);
+ }
+
+ int VisitFixedDoubleArray(Map* map, FixedDoubleArray* object) {
+ return VisitLeftTrimmableArray(map, object);
}
// ===========================================================================
@@ -217,7 +275,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- BytecodeArray::BodyDescriptorWeak::IterateBody(object, size, this);
+ BytecodeArray::BodyDescriptorWeak::IterateBody(map, object, size, this);
object->MakeOlder();
return size;
}
@@ -226,7 +284,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -234,15 +292,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
- return size;
- }
-
- int VisitJSFunction(Map* map, JSFunction* object) {
- if (!ShouldVisit(object)) return 0;
- int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -255,7 +305,7 @@ class ConcurrentMarkingVisitor final
VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
VisitPointer(
map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
- VisitPointer(map, HeapObject::RawField(
+ VisitPointer(map, HeapObject::RawMaybeWeakField(
map, Map::kTransitionsOrPrototypeInfoOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
VisitPointer(map, HeapObject::RawField(map, Map::kWeakCellCacheOffset));
@@ -268,7 +318,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(object)) return 0;
int size = Context::BodyDescriptorWeak::SizeOf(map, object);
VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptorWeak::IterateBody(object, size, this);
+ Context::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -276,7 +326,7 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(array)) return 0;
VisitMapPointer(array, array->map_slot());
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
- TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
return size;
}
@@ -338,18 +388,59 @@ class ConcurrentMarkingVisitor final
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ // This should never happen, because we don't use snapshotting for objects
+ // which contain weak references.
+ UNREACHABLE();
+ }
+
private:
SlotSnapshot* slot_snapshot_;
};
template <typename T>
+ int VisitJSObjectSubclass(Map* map, T* object) {
+ int size = T::BodyDescriptor::SizeOf(map, object);
+ int used_size = map->UsedInstanceSize();
+ DCHECK_LE(used_size, size);
+ DCHECK_GE(used_size, T::kHeaderSize);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(object, snapshot);
+ return size;
+ }
+
+ template <typename T>
+ int VisitLeftTrimmableArray(Map* map, T* object) {
+ // The synchronized_length() function checks that the length is a Smi.
+ // This is not necessarily the case if the array is being left-trimmed.
+ Object* length = object->unchecked_synchronized_length();
+ if (!ShouldVisit(object)) return 0;
+ // The cached length must be the actual length as the array is not black.
+ // Left trimming marks the array black before over-writing the length.
+ DCHECK(length->IsSmi());
+ int size = T::SizeFor(Smi::ToInt(length));
+ VisitMapPointer(object, object->map_slot());
+ T::BodyDescriptor::IterateBody(map, object, size, this);
+ return size;
+ }
+
+ template <typename T>
const SlotSnapshot& MakeSlotSnapshot(Map* map, T* object, int size) {
- // TODO(ulan): Iterate only the existing fields and skip slack at the end
- // of the object.
SlotSnapshottingVisitor visitor(&slot_snapshot_);
visitor.VisitPointer(object,
reinterpret_cast<Object**>(object->map_slot()));
- T::BodyDescriptor::IterateBody(object, size, &visitor);
+ T::BodyDescriptor::IterateBody(map, object, size, &visitor);
+ return slot_snapshot_;
+ }
+
+ template <typename T>
+ const SlotSnapshot& MakeSlotSnapshotWeak(Map* map, T* object, int size) {
+ SlotSnapshottingVisitor visitor(&slot_snapshot_);
+ visitor.VisitPointer(object,
+ reinterpret_cast<Object**>(object->map_slot()));
+ T::BodyDescriptorWeak::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
ConcurrentMarking::MarkingWorklist::View shared_;
@@ -484,6 +575,7 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
weak_objects_->weak_cells.FlushToGlobal(task_id);
weak_objects_->transition_arrays.FlushToGlobal(task_id);
+ weak_objects_->weak_references.FlushToGlobal(task_id);
base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
total_marked_bytes_.Increment(marked_bytes);
{
@@ -501,15 +593,24 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
}
void ConcurrentMarking::ScheduleTasks() {
- DCHECK(heap_->use_tasks());
+ DCHECK(!heap_->IsTearingDown());
if (!FLAG_concurrent_marking) return;
base::LockGuard<base::Mutex> guard(&pending_lock_);
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
- task_count_ = Max(
- 1, Min(kMaxTasks,
- static_cast<int>(V8::GetCurrentPlatform()
- ->NumberOfAvailableBackgroundThreads())));
+ static const int num_cores =
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
+#if defined(V8_OS_MACOSX)
+ // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
+ // marking on competing hyper-threads (regresses Octane/Splay). As such,
+ // only use num_cores/2, leaving one of those for the main thread.
+ // TODO(ulan): Use all cores on Mac 10.12+.
+ task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
+#else // defined(OS_MACOSX)
+ // On other platforms use all logical cores, leaving one for the main
+ // thread.
+ task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
+#endif // defined(OS_MACOSX)
}
// Task id 0 is for the main thread.
for (int i = 1; i <= task_count_; i++) {
@@ -521,17 +622,17 @@ void ConcurrentMarking::ScheduleTasks() {
task_state_[i].preemption_request.SetValue(false);
is_pending_[i] = true;
++pending_task_count_;
- Task* task = new Task(heap_->isolate(), this, &task_state_[i], i);
+ auto task =
+ base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
cancelable_id_[i] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
DCHECK_EQ(task_count_, pending_task_count_);
}
void ConcurrentMarking::RescheduleTasksIfNeeded() {
- if (!FLAG_concurrent_marking || !heap_->use_tasks()) return;
+ if (!FLAG_concurrent_marking || heap_->IsTearingDown()) return;
{
base::LockGuard<base::Mutex> guard(&pending_lock_);
if (pending_task_count_ > 0) return;
diff --git a/deps/v8/src/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index ace5c35472..21ee6dc251 100644
--- a/deps/v8/src/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FACTORY_INL_H_
-#define V8_FACTORY_INL_H_
+#ifndef V8_HEAP_FACTORY_INL_H_
+#define V8_HEAP_FACTORY_INL_H_
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/string-hasher.h"
namespace v8 {
namespace internal {
@@ -152,4 +153,4 @@ Handle<String> Factory::Uint32ToString(uint32_t value) {
} // namespace internal
} // namespace v8
-#endif // V8_FACTORY_INL_H_
+#endif // V8_HEAP_FACTORY_INL_H_
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/heap/factory.cc
index 6fd8e8c61e..aecbc880e1 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
@@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/conversions.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/objects/bigint.h"
@@ -27,82 +28,94 @@
namespace v8 {
namespace internal {
+HeapObject* Factory::AllocateRawWithImmortalMap(int size,
+ PretenureFlag pretenure,
+ Map* map,
+ AllocationAlignment alignment) {
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(
+ size, Heap::SelectSpace(pretenure), alignment);
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ return result;
+}
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-//
-// Warning: Do not use the identifiers __object__, __maybe_object__,
-// __allocation__ or __scope__ in a call to this macro.
-
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
- if (__allocation__.To(&__object__)) { \
- DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE); \
- }
-
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- do { \
- AllocationResult __allocation__ = FUNCTION_CALL; \
- Object* __object__ = nullptr; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
- /* Two GCs before panicking. In newspace will almost always succeed. */ \
- for (int __i__ = 0; __i__ < 2; __i__++) { \
- (ISOLATE)->heap()->CollectGarbage( \
- __allocation__.RetrySpace(), \
- GarbageCollectionReason::kAllocationFailure); \
- __allocation__ = FUNCTION_CALL; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
- } \
- (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
- (ISOLATE)->heap()->CollectAllAvailableGarbage( \
- GarbageCollectionReason::kLastResort); \
- { \
- AlwaysAllocateScope __scope__(ISOLATE); \
- __allocation__ = FUNCTION_CALL; \
- } \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
- /* TODO(1181417): Fix this. */ \
- v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
- return Handle<TYPE>(); \
- } while (false)
-
-template<typename T>
-Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->Allocate(*map, space),
- T);
-}
-
-
-template<typename T>
-Handle<T> Factory::New(Handle<Map> map,
- AllocationSpace space,
- Handle<AllocationSite> allocation_site) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->Allocate(*map, space, *allocation_site),
- T);
-}
-
-
-Handle<HeapObject> Factory::NewFillerObject(int size,
- bool double_align,
- AllocationSpace space) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFillerObject(size, double_align, space),
- HeapObject);
+HeapObject* Factory::AllocateRawWithAllocationSite(
+ Handle<Map> map, PretenureFlag pretenure,
+ Handle<AllocationSite> allocation_site) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ if (!allocation_site.is_null()) size += AllocationMemento::kSize;
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(*map, write_barrier_mode);
+ if (!allocation_site.is_null()) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
+ }
+ return result;
+}
+
+void Factory::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_after_allocation(*allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
+ DCHECK(allocation_site->map() == *allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
+
+HeapObject* Factory::AllocateRawArray(int size, PretenureFlag pretenure) {
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ return result;
+}
+
+HeapObject* Factory::AllocateRawFixedArray(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ return AllocateRawArray(FixedArray::SizeFor(length), pretenure);
}
+HeapObject* Factory::New(Handle<Map> map, PretenureFlag pretenure) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ HeapObject* result = isolate()->heap()->AllocateRawWithRetry(size, space);
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(*map, write_barrier_mode);
+ return result;
+}
+
+Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
+ AllocationSpace space) {
+ AllocationAlignment alignment = double_align ? kDoubleAligned : kWordAligned;
+ Heap* heap = isolate()->heap();
+ HeapObject* result = heap->AllocateRawWithRetry(size, space, alignment);
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ DCHECK(chunk->owner()->identity() == space);
+#endif
+ heap->CreateFillerObjectAt(result->address(), size, ClearRecordedSlots::kNo);
+ return Handle<HeapObject>(result, isolate());
+}
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo> result =
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE, TENURED));
- result->set_prototype_users(WeakFixedArray::Empty());
+ result->set_prototype_users(FixedArrayOfWeakCells::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
- result->set_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
result->set_bit_field(0);
return result;
}
@@ -164,7 +177,7 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
- Handle<Oddball> oddball = New<Oddball>(map, OLD_SPACE);
+ Handle<Oddball> oddball(Oddball::cast(New(map, TENURED)), isolate());
Oddball::Initialize(isolate(), oddball, to_string, to_number, type_of, kind);
return oddball;
}
@@ -173,29 +186,62 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length,
PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocatePropertyArray(length, pretenure),
- PropertyArray);
+ HeapObject* result = AllocateRawFixedArray(length, pretenure);
+ result->set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
+ Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
+ array->initialize_length(length);
+ MemsetPointer(array->data_start(), *undefined_value(), length);
+ return array;
}
-Handle<FixedArray> Factory::NewFixedArrayWithMap(
- Heap::RootListIndex map_root_index, int length, PretenureFlag pretenure) {
+Handle<FixedArray> Factory::NewFixedArrayWithFiller(
+ Heap::RootListIndex map_root_index, int length, Object* filler,
+ PretenureFlag pretenure) {
+ HeapObject* result = AllocateRawFixedArray(length, pretenure);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
+ Map* map = Map::cast(isolate()->heap()->root(map_root_index));
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ Handle<FixedArray> array(FixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(), filler, length);
+ return array;
+}
+
+template <typename T>
+Handle<T> Factory::NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
+ int length, PretenureFlag pretenure) {
+ static_assert(std::is_base_of<FixedArray, T>::value,
+ "T must be a descendant of FixedArray");
// Zero-length case must be handled outside, where the knowledge about
// the map is.
DCHECK_LT(0, length);
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArrayWithMap(
- map_root_index, length, pretenure),
- FixedArray);
+ return Handle<T>::cast(NewFixedArrayWithFiller(
+ map_root_index, length, *undefined_value(), pretenure));
}
+template Handle<FixedArray> Factory::NewFixedArrayWithMap<FixedArray>(
+ Heap::RootListIndex, int, PretenureFlag);
+
Handle<FixedArray> Factory::NewFixedArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *undefined_value(), pretenure);
+}
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArray(length, pretenure),
- FixedArray);
+Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
+ PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_weak_fixed_array();
+ HeapObject* result =
+ AllocateRawArray(WeakFixedArray::SizeFor(length), pretenure);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakFixedArrayMapRootIndex));
+ result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(),
+ HeapObjectReference::Strong(*undefined_value()), length);
+ return array;
}
MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
@@ -203,23 +249,29 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(int length,
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
- AllocationResult allocation =
- isolate()->heap()->AllocateFixedArray(length, pretenure);
- Object* array = nullptr;
- if (!allocation.To(&array)) return MaybeHandle<FixedArray>();
- return Handle<FixedArray>(FixedArray::cast(array), isolate());
+ int size = FixedArray::SizeFor(length);
+ AllocationSpace space = Heap::SelectSpace(pretenure);
+ Heap* heap = isolate()->heap();
+ AllocationResult allocation = heap->AllocateRaw(size, space);
+ HeapObject* result = nullptr;
+ if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
+ if (size > kMaxRegularHeapObjectSize && FLAG_use_marking_progress_bar) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<FixedArray> array(FixedArray::cast(result), isolate());
+ array->set_length(length);
+ MemsetPointer(array->data_start(), heap->undefined_value(), length);
+ return array;
}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int length,
PretenureFlag pretenure) {
DCHECK_LE(0, length);
if (length == 0) return empty_fixed_array();
-
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateFixedArrayWithFiller(
- Heap::kFixedArrayMapRootIndex, length, pretenure, *the_hole_value()),
- FixedArray);
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *the_hole_value(), pretenure);
}
Handle<FixedArray> Factory::NewUninitializedFixedArray(
@@ -230,16 +282,30 @@ Handle<FixedArray> Factory::NewUninitializedFixedArray(
// TODO(ulan): As an experiment this temporarily returns an initialized fixed
// array. After getting canary/performance coverage, either remove the
// function or revert to returning uninitilized array.
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFixedArray(length, pretenure),
- FixedArray);
+ return NewFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
+ *undefined_value(), pretenure);
}
Handle<FeedbackVector> Factory::NewFeedbackVector(
Handle<SharedFunctionInfo> shared, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateFeedbackVector(*shared, pretenure),
- FeedbackVector);
+ int length = shared->feedback_metadata()->slot_count();
+ DCHECK_LE(0, length);
+ int size = FeedbackVector::SizeFor(length);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *feedback_vector_map());
+ Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
+ vector->set_shared_function_info(*shared);
+ vector->set_optimized_code_weak_or_smi(MaybeObject::FromSmi(Smi::FromEnum(
+ FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
+ : OptimizationMarker::kNone)));
+ vector->set_length(length);
+ vector->set_invocation_count(0);
+ vector->set_profiler_ticks(0);
+ vector->set_deopt_count(0);
+ // TODO(leszeks): Initialize based on the feedback metadata.
+ MemsetPointer(vector->slots_start(), *undefined_value(), length);
+ return vector;
}
Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
@@ -261,37 +327,66 @@ Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
size++;
}
+ if (size == 0) {
+ return isolate()->factory()->empty_boilerplate_description();
+ }
+
Handle<BoilerplateDescription> description =
- Handle<BoilerplateDescription>::cast(NewFixedArray(size, TENURED));
+ Handle<BoilerplateDescription>::cast(NewFixedArrayWithMap(
+ Heap::kBoilerplateDescriptionMapRootIndex, size, TENURED));
if (has_different_size_backing_store) {
- DCHECK((boilerplate != (all_properties - index_keys)) || has_seen_proto);
+ DCHECK_IMPLIES((boilerplate == (all_properties - index_keys)),
+ has_seen_proto);
description->set_backing_store_size(isolate(), backing_store_size);
}
+
return description;
}
-Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
+Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int length,
PretenureFlag pretenure) {
- DCHECK_LE(0, size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
- FixedArrayBase);
+ DCHECK_LE(0, length);
+ if (length == 0) return empty_fixed_array();
+ if (length > FixedDoubleArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ int size = FixedDoubleArray::SizeFor(length);
+ Map* map = *fixed_double_array_map();
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, map, kDoubleAligned);
+ Handle<FixedDoubleArray> array(FixedDoubleArray::cast(result), isolate());
+ array->set_length(length);
+ return array;
}
-
Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
- int size,
- PretenureFlag pretenure) {
- DCHECK_LE(0, size);
- Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
- if (size > 0) {
- Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, size);
+ int length, PretenureFlag pretenure) {
+ DCHECK_LE(0, length);
+ Handle<FixedArrayBase> array = NewFixedDoubleArray(length, pretenure);
+ if (length > 0) {
+ Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, length);
}
return array;
}
+Handle<FeedbackMetadata> Factory::NewFeedbackMetadata(int slot_count) {
+ DCHECK_LE(0, slot_count);
+ int size = FeedbackMetadata::SizeFor(slot_count);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *feedback_metadata_map());
+ Handle<FeedbackMetadata> data(FeedbackMetadata::cast(result), isolate());
+ data->set_slot_count(slot_count);
+
+ // Initialize the data section to 0.
+ int data_size = size - FeedbackMetadata::kHeaderSize;
+ byte* data_start = data->address() + FeedbackMetadata::kHeaderSize;
+ memset(data_start, 0, data_size);
+ // Fields have been zeroed out but not initialized, so this object will not
+ // pass object verification at this point.
+ return data;
+}
+
Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
PretenureFlag pretenure) {
DCHECK_LE(0, number_of_frames);
@@ -302,33 +397,43 @@ Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
}
Handle<SmallOrderedHashSet> Factory::NewSmallOrderedHashSet(
- int size, PretenureFlag pretenure) {
- DCHECK_LE(0, size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateSmallOrderedHashSet(size, pretenure),
- SmallOrderedHashSet);
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(0, capacity);
+ CHECK_LE(capacity, SmallOrderedHashSet::kMaxCapacity);
+ DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
+
+ int size = SmallOrderedHashSet::Size(capacity);
+ Map* map = *small_ordered_hash_set_map();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result),
+ isolate());
+ table->Initialize(isolate(), capacity);
+ return table;
}
Handle<SmallOrderedHashMap> Factory::NewSmallOrderedHashMap(
- int size, PretenureFlag pretenure) {
- DCHECK_LE(0, size);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateSmallOrderedHashMap(size, pretenure),
- SmallOrderedHashMap);
+ int capacity, PretenureFlag pretenure) {
+ DCHECK_LE(0, capacity);
+ CHECK_LE(capacity, SmallOrderedHashMap::kMaxCapacity);
+ DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
+
+ int size = SmallOrderedHashMap::Size(capacity);
+ Map* map = *small_ordered_hash_map_map();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result),
+ isolate());
+ table->Initialize(isolate(), capacity);
+ return table;
}
Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
}
-
Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
}
-
Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE, TENURED));
@@ -337,34 +442,29 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
return accessors;
}
-
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
Utf8StringKey key(string, isolate()->heap()->HashSeed());
return InternalizeStringWithKey(&key);
}
-
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
OneByteStringKey key(string, isolate()->heap()->HashSeed());
return InternalizeStringWithKey(&key);
}
-
Handle<String> Factory::InternalizeOneByteString(
Handle<SeqOneByteString> string, int from, int length) {
SeqOneByteSubStringKey key(string, from, length);
return InternalizeStringWithKey(&key);
}
-
Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
TwoByteStringKey key(string, isolate()->heap()->HashSeed());
return InternalizeStringWithKey(&key);
}
-
-template<class StringTableKey>
+template <class StringTableKey>
Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
return StringTable::LookupKey(isolate(), key);
}
@@ -375,16 +475,13 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
if (length == 0) return empty_string();
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- NewRawOneByteString(string.length(), pretenure),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawOneByteString(string.length(), pretenure),
+ String);
DisallowHeapAllocation no_gc;
// Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(*result)->GetChars(),
- string.start(),
+ CopyChars(SeqOneByteString::cast(*result)->GetChars(), string.start(),
length);
return result;
}
@@ -403,8 +500,8 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
// Non-ASCII and we need to decode.
auto non_ascii = string.SubVector(non_ascii_start, length);
- Access<UnicodeCache::Utf8Decoder>
- decoder(isolate()->unicode_cache()->utf8_decoder());
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
decoder->Reset(non_ascii);
int utf16_length = static_cast<int>(decoder->Utf16Length());
@@ -414,8 +511,7 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
- String);
+ NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
// Copy ASCII portion.
uint16_t* data = result->GetChars();
@@ -480,20 +576,14 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
if (String::IsOneByte(string, length)) {
if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
Handle<SeqOneByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- NewRawOneByteString(length, pretenure),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawOneByteString(length, pretenure), String);
CopyChars(result->GetChars(), string, length);
return result;
} else {
Handle<SeqTwoByteString> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(),
- result,
- NewRawTwoByteString(length, pretenure),
- String);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ NewRawTwoByteString(length, pretenure), String);
CopyChars(result->GetChars(), string, length);
return result;
}
@@ -510,55 +600,161 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(
pretenure);
}
+namespace {
+
+bool inline IsOneByte(Vector<const char> str, int chars) {
+ // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+ return chars == str.length();
+}
+
+bool inline IsOneByte(Handle<String> str, int chars) {
+ return str->IsOneByteRepresentation();
+}
+
+inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
+ int len) {
+ // Only works for one byte strings.
+ DCHECK(vector.length() == len);
+ MemCopy(chars, vector.start(), len);
+}
+
+inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
+ int len) {
+ unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
+ while (!it.Done()) {
+ DCHECK_GT(len, 0);
+ len -= 1;
+
+ uint16_t c = *it;
+ ++it;
+ DCHECK_NE(unibrow::Utf8::kBadChar, c);
+ *chars++ = c;
+ }
+ DCHECK_EQ(len, 0);
+}
+
+inline void WriteOneByteData(Handle<String> s, uint8_t* chars, int len) {
+ DCHECK(s->length() == len);
+ String::WriteToFlat(*s, chars, 0, len);
+}
+
+inline void WriteTwoByteData(Handle<String> s, uint16_t* chars, int len) {
+ DCHECK(s->length() == len);
+ String::WriteToFlat(*s, chars, 0, len);
+}
+
+} // namespace
+
+Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
+ int length, uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, length);
+ // The canonical empty_string is the only zero-length string we allow.
+ DCHECK_IMPLIES(
+ length == 0,
+ isolate()->heap()->roots_[Heap::kempty_stringRootIndex] == nullptr);
+
+ Map* map = *one_byte_internalized_string_map();
+ int size = SeqOneByteString::SizeFor(length);
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<SeqOneByteString> answer(SeqOneByteString::cast(result), isolate());
+ answer->set_length(length);
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+ return answer;
+}
+
+Handle<String> Factory::AllocateTwoByteInternalizedString(
+ Vector<const uc16> str, uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, str.length());
+ DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
+
+ Map* map = *internalized_string_map();
+ int size = SeqTwoByteString::SizeFor(str.length());
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<String> answer(String::cast(result), isolate());
+ answer->set_length(str.length());
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+
+ // Fill in the characters.
+ MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
+ str.length() * kUC16Size);
+
+ return answer;
+}
+
+template <bool is_one_byte, typename T>
+Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field) {
+ DCHECK_LE(0, chars);
+ DCHECK_GE(String::kMaxLength, chars);
+
+ // Compute map and object size.
+ int size;
+ Map* map;
+ if (is_one_byte) {
+ map = *one_byte_internalized_string_map();
+ size = SeqOneByteString::SizeFor(chars);
+ } else {
+ map = *internalized_string_map();
+ size = SeqTwoByteString::SizeFor(chars);
+ }
+
+ HeapObject* result = AllocateRawWithImmortalMap(size, TENURED, map);
+ Handle<String> answer(String::cast(result), isolate());
+ answer->set_length(chars);
+ answer->set_hash_field(hash_field);
+ DCHECK_EQ(size, answer->Size());
+
+ if (is_one_byte) {
+ WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(), chars);
+ } else {
+ WriteTwoByteData(t, SeqTwoByteString::cast(*answer)->GetChars(), chars);
+ }
+ return answer;
+}
+
Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
int chars,
uint32_t hash_field) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateInternalizedStringFromUtf8(
- str, chars, hash_field),
- String);
+ if (IsOneByte(str, chars)) {
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ MemCopy(result->GetChars(), str.start(), str.length());
+ return result;
+ }
+ return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
}
-
-MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedString(
- Vector<const uint8_t> str,
- uint32_t hash_field) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateOneByteInternalizedString(str, hash_field),
- String);
+Handle<String> Factory::NewOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field) {
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(str.length(), hash_field);
+ MemCopy(result->GetChars(), str.start(), str.length());
+ return result;
}
-
-MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedSubString(
+Handle<String> Factory::NewOneByteInternalizedSubString(
Handle<SeqOneByteString> string, int offset, int length,
uint32_t hash_field) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateOneByteInternalizedString(
- Vector<const uint8_t>(string->GetChars() + offset, length),
- hash_field),
- String);
+ Handle<SeqOneByteString> result =
+ AllocateRawOneByteInternalizedString(length, hash_field);
+ MemCopy(result->GetChars(), string->GetChars() + offset, length);
+ return result;
}
-
-MUST_USE_RESULT Handle<String> Factory::NewTwoByteInternalizedString(
- Vector<const uc16> str,
- uint32_t hash_field) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateTwoByteInternalizedString(str, hash_field),
- String);
+Handle<String> Factory::NewTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field) {
+ return AllocateTwoByteInternalizedString(str, hash_field);
}
-
-Handle<String> Factory::NewInternalizedStringImpl(
- Handle<String> string, int chars, uint32_t hash_field) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateInternalizedStringImpl(
- *string, chars, hash_field),
- String);
+Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
+ int chars,
+ uint32_t hash_field) {
+ if (IsOneByte(string, chars)) {
+ return AllocateInternalizedStringImpl<true>(string, chars, hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(string, chars, hash_field);
}
namespace {
@@ -581,7 +777,8 @@ MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
return f->short_external_one_byte_internalized_string_map();
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
return f->short_external_internalized_string_with_one_byte_data_map();
- default: return MaybeHandle<Map>(); // No match found.
+ default:
+ return MaybeHandle<Map>(); // No match found.
}
}
@@ -599,7 +796,8 @@ template <class StringClass>
Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
- Handle<StringClass> external_string = New<StringClass>(map, OLD_SPACE);
+ Handle<StringClass> external_string(StringClass::cast(New(map, TENURED)),
+ isolate());
external_string->set_length(cast_string->length());
external_string->set_hash_field(cast_string->hash_field());
external_string->set_resource(nullptr);
@@ -618,12 +816,17 @@ MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawOneByteString(length, pretenure),
- SeqOneByteString);
-}
+ int size = SeqOneByteString::SizeFor(length);
+ DCHECK_GE(SeqOneByteString::kMaxSize, size);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *one_byte_string_map());
+ Handle<SeqOneByteString> string(SeqOneByteString::cast(result), isolate());
+ string->set_length(length);
+ string->set_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string->Size());
+ return string;
+}
MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
int length, PretenureFlag pretenure) {
@@ -631,12 +834,17 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
}
DCHECK_GT(length, 0); // Use Factory::empty_string() instead.
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
- SeqTwoByteString);
-}
+ int size = SeqTwoByteString::SizeFor(length);
+ DCHECK_GE(SeqTwoByteString::kMaxSize, size);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *string_map());
+ Handle<SeqTwoByteString> string(SeqTwoByteString::cast(result), isolate());
+ string->set_length(length);
+ string->set_hash_field(String::kEmptyHashField);
+ DCHECK_EQ(size, string->Size());
+ return string;
+}
Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
if (code <= String::kMaxOneByteCharCodeU) {
@@ -661,14 +869,12 @@ Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
return result;
}
-
// Returns true for a character in a range. Both limits are inclusive.
static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
// This makes uses of the the unsigned wraparound.
return character - from <= to - from;
}
-
static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
uint16_t c1,
uint16_t c2) {
@@ -676,8 +882,8 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
// LookupTwoCharsStringIfExists, so we skip this step for such strings.
if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
Handle<String> result;
- if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2).
- ToHandle(&result)) {
+ if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2)
+ .ToHandle(&result)) {
return result;
}
}
@@ -704,8 +910,7 @@ static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
}
}
-
-template<typename SinkChar, typename StringType>
+template <typename SinkChar, typename StringType>
Handle<String> ConcatStringContent(Handle<StringType> result,
Handle<String> first,
Handle<String> second) {
@@ -716,7 +921,6 @@ Handle<String> ConcatStringContent(Handle<StringType> result,
return result;
}
-
MaybeHandle<String> Factory::NewConsString(Handle<String> left,
Handle<String> right) {
if (left->IsThinString()) {
@@ -787,10 +991,11 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
}
return (is_one_byte_data_in_two_byte_string)
- ? ConcatStringContent<uint8_t>(
- NewRawOneByteString(length).ToHandleChecked(), left, right)
- : ConcatStringContent<uc16>(
- NewRawTwoByteString(length).ToHandleChecked(), left, right);
+ ? ConcatStringContent<uint8_t>(
+ NewRawOneByteString(length).ToHandleChecked(), left, right)
+ : ConcatStringContent<uc16>(
+ NewRawTwoByteString(length).ToHandleChecked(), left,
+ right);
}
bool one_byte = (is_one_byte || is_one_byte_data_in_two_byte_string);
@@ -804,9 +1009,10 @@ Handle<String> Factory::NewConsString(Handle<String> left, Handle<String> right,
DCHECK_GE(length, ConsString::kMinLength);
DCHECK_LE(length, String::kMaxLength);
- Handle<ConsString> result =
- one_byte ? New<ConsString>(cons_one_byte_string_map(), NEW_SPACE)
- : New<ConsString>(cons_string_map(), NEW_SPACE);
+ Handle<ConsString> result(
+ ConsString::cast(one_byte ? New(cons_one_byte_string_map(), NOT_TENURED)
+ : New(cons_string_map(), NOT_TENURED)),
+ isolate());
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
@@ -832,8 +1038,7 @@ Handle<String> Factory::NewSurrogatePairString(uint16_t lead, uint16_t trail) {
return str;
}
-Handle<String> Factory::NewProperSubString(Handle<String> str,
- int begin,
+Handle<String> Factory::NewProperSubString(Handle<String> str, int begin,
int end) {
#if VERIFY_HEAP
if (FLAG_verify_heap) str->StringVerify();
@@ -890,7 +1095,8 @@ Handle<String> Factory::NewProperSubString(Handle<String> str,
Handle<Map> map = str->IsOneByteRepresentation()
? sliced_one_byte_string_map()
: sliced_string_map();
- Handle<SlicedString> slice = New<SlicedString>(map, NEW_SPACE);
+ Handle<SlicedString> slice(SlicedString::cast(New(map, NOT_TENURED)),
+ isolate());
slice->set_hash_field(String::kEmptyHashField);
slice->set_length(length);
@@ -914,8 +1120,8 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
} else {
map = external_one_byte_string_map();
}
- Handle<ExternalOneByteString> external_string =
- New<ExternalOneByteString>(map, NEW_SPACE);
+ Handle<ExternalOneByteString> external_string(
+ ExternalOneByteString::cast(New(map, NOT_TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
@@ -923,7 +1129,6 @@ MaybeHandle<String> Factory::NewExternalStringFromOneByte(
return external_string;
}
-
MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource) {
size_t length = resource->length();
@@ -935,7 +1140,8 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
// For small strings we check whether the resource contains only
// one byte characters. If yes, we use a different string map.
static const size_t kOneByteCheckLengthLimit = 32;
- bool is_one_byte = length <= kOneByteCheckLengthLimit &&
+ bool is_one_byte =
+ length <= kOneByteCheckLengthLimit &&
String::IsOneByte(resource->data(), static_cast<int>(length));
Handle<Map> map;
if (resource->IsCompressible()) {
@@ -946,8 +1152,8 @@ MaybeHandle<String> Factory::NewExternalStringFromTwoByte(
map = is_one_byte ? external_string_with_one_byte_data_map()
: external_string_map();
}
- Handle<ExternalTwoByteString> external_string =
- New<ExternalTwoByteString>(map, NEW_SPACE);
+ Handle<ExternalTwoByteString> external_string(
+ ExternalTwoByteString::cast(New(map, NOT_TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
@@ -961,8 +1167,8 @@ Handle<ExternalOneByteString> Factory::NewNativeSourceString(
DCHECK_LE(length, static_cast<size_t>(String::kMaxLength));
Handle<Map> map = native_source_string_map();
- Handle<ExternalOneByteString> external_string =
- New<ExternalOneByteString>(map, OLD_SPACE);
+ Handle<ExternalOneByteString> external_string(
+ ExternalOneByteString::cast(New(map, TENURED)), isolate());
external_string->set_length(static_cast<int>(length));
external_string->set_hash_field(String::kEmptyHashField);
external_string->set_resource(resource);
@@ -983,12 +1189,23 @@ Handle<JSStringIterator> Factory::NewJSStringIterator(Handle<String> string) {
}
Handle<Symbol> Factory::NewSymbol() {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateSymbol(),
- Symbol);
-}
+ // Statically ensure that it is safe to allocate symbols in paged spaces.
+ STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
+
+ HeapObject* result =
+ AllocateRawWithImmortalMap(Symbol::kSize, TENURED, *symbol_map());
+
+ // Generate a random hash value.
+ int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
+ Handle<Symbol> symbol(Symbol::cast(result), isolate());
+ symbol->set_hash_field(Name::kIsNotArrayIndexMask |
+ (hash << Name::kHashShift));
+ symbol->set_name(*undefined_value());
+ symbol->set_flags(0);
+ DCHECK(!symbol->is_private());
+ return symbol;
+}
Handle<Symbol> Factory::NewPrivateSymbol() {
Handle<Symbol> symbol = NewSymbol();
@@ -1003,10 +1220,8 @@ Handle<Symbol> Factory::NewPrivateFieldSymbol() {
}
Handle<Context> Factory::NewNativeContext() {
- Handle<FixedArray> array =
- NewFixedArray(Context::NATIVE_CONTEXT_SLOTS, TENURED);
- array->set_map_no_write_barrier(*native_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kNativeContextMapRootIndex, Context::NATIVE_CONTEXT_SLOTS, TENURED);
context->set_native_context(*context);
context->set_errors_thrown(Smi::kZero);
context->set_math_random_index(Smi::kZero);
@@ -1017,14 +1232,11 @@ Handle<Context> Factory::NewNativeContext() {
return context;
}
-
Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), SCRIPT_SCOPE);
- Handle<FixedArray> array =
- NewFixedArray(scope_info->ContextLength(), TENURED);
- array->set_map_no_write_barrier(*script_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kScriptContextMapRootIndex, scope_info->ContextLength(), TENURED);
context->set_closure(*function);
context->set_previous(function->context());
context->set_extension(*scope_info);
@@ -1033,12 +1245,11 @@ Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
return context;
}
-
Handle<ScriptContextTable> Factory::NewScriptContextTable() {
- Handle<FixedArray> array = NewFixedArray(1);
- array->set_map_no_write_barrier(*script_context_table_map());
Handle<ScriptContextTable> context_table =
- Handle<ScriptContextTable>::cast(array);
+ NewFixedArrayWithMap<ScriptContextTable>(
+ Heap::kScriptContextTableMapRootIndex,
+ ScriptContextTable::kMinLength);
context_table->set_used(0);
return context_table;
}
@@ -1047,10 +1258,8 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
Handle<JSFunction> function,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), MODULE_SCOPE);
- Handle<FixedArray> array =
- NewFixedArray(scope_info->ContextLength(), TENURED);
- array->set_map_no_write_barrier(*module_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kModuleContextMapRootIndex, scope_info->ContextLength(), TENURED);
context->set_closure(*function);
context->set_previous(function->context());
context->set_extension(*module);
@@ -1064,20 +1273,18 @@ Handle<Context> Factory::NewFunctionContext(int length,
ScopeType scope_type) {
DCHECK(function->shared()->scope_info()->scope_type() == scope_type);
DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
- Handle<FixedArray> array = NewFixedArray(length);
- Handle<Map> map;
+ Heap::RootListIndex mapRootIndex;
switch (scope_type) {
case EVAL_SCOPE:
- map = eval_context_map();
+ mapRootIndex = Heap::kEvalContextMapRootIndex;
break;
case FUNCTION_SCOPE:
- map = function_context_map();
+ mapRootIndex = Heap::kFunctionContextMapRootIndex;
break;
default:
UNREACHABLE();
}
- array->set_map_no_write_barrier(*map);
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(mapRootIndex, length);
context->set_closure(*function);
context->set_previous(function->context());
context->set_extension(*the_hole_value());
@@ -1092,9 +1299,8 @@ Handle<Context> Factory::NewCatchContext(Handle<JSFunction> function,
Handle<Object> thrown_object) {
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
Handle<ContextExtension> extension = NewContextExtension(scope_info, name);
- Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
- array->set_map_no_write_barrier(*catch_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kCatchContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 1);
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*extension);
@@ -1113,9 +1319,8 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Handle<ContextExtension> context_extension = NewContextExtension(
scope_info, extension.is_null() ? Handle<Object>::cast(undefined_value())
: Handle<Object>::cast(extension));
- Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS + 2);
- array->set_map_no_write_barrier(*debug_evaluate_context_map());
- Handle<Context> c = Handle<Context>::cast(array);
+ Handle<Context> c = NewFixedArrayWithMap<Context>(
+ Heap::kDebugEvaluateContextMapRootIndex, Context::MIN_CONTEXT_SLOTS + 2);
c->set_closure(wrapped.is_null() ? previous->closure() : wrapped->closure());
c->set_previous(*previous);
c->set_native_context(previous->native_context());
@@ -1131,9 +1336,8 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
Handle<JSReceiver> extension) {
Handle<ContextExtension> context_extension =
NewContextExtension(scope_info, extension);
- Handle<FixedArray> array = NewFixedArray(Context::MIN_CONTEXT_SLOTS);
- array->set_map_no_write_barrier(*with_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kWithContextMapRootIndex, Context::MIN_CONTEXT_SLOTS);
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*context_extension);
@@ -1141,14 +1345,12 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
return context;
}
-
Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info) {
DCHECK_EQ(scope_info->scope_type(), BLOCK_SCOPE);
- Handle<FixedArray> array = NewFixedArray(scope_info->ContextLength());
- array->set_map_no_write_barrier(*block_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
+ Handle<Context> context = NewFixedArrayWithMap<Context>(
+ Heap::kBlockContextMapRootIndex, scope_info->ContextLength());
context->set_closure(*function);
context->set_previous(*previous);
context->set_extension(*scope_info);
@@ -1157,8 +1359,22 @@ Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
}
Handle<Struct> Factory::NewStruct(InstanceType type, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateStruct(type, pretenure), Struct);
+ Map* map;
+ switch (type) {
+#define MAKE_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ map = *name##_map(); \
+ break;
+ STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+ default:
+ UNREACHABLE();
+ }
+ int size = map->instance_size();
+ HeapObject* result = AllocateRawWithImmortalMap(size, pretenure, map);
+ Handle<Struct> str(Struct::cast(result), isolate());
+ str->InitializeBody(size);
+ return str;
}
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
@@ -1169,7 +1385,6 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
return entry;
}
-
Handle<AccessorInfo> Factory::NewAccessorInfo() {
Handle<AccessorInfo> info =
Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE, TENURED));
@@ -1180,7 +1395,6 @@ Handle<AccessorInfo> Factory::NewAccessorInfo() {
return info;
}
-
Handle<Script> Factory::NewScript(Handle<String> source) {
// Create and initialize script object.
Heap* heap = isolate()->heap();
@@ -1196,10 +1410,11 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared_or_wrapped_arguments(heap->undefined_value());
script->set_eval_from_position(0);
- script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+ script->set_shared_function_infos(*empty_weak_fixed_array(),
+ SKIP_WRITE_BARRIER);
script->set_flags(0);
script->set_host_defined_options(*empty_fixed_array());
- heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
+ heap->set_script_list(*FixedArrayOfWeakCells::Add(script_list(), script));
return script;
}
@@ -1237,109 +1452,180 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
}
Handle<Foreign> Factory::NewForeign(Address addr, PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateForeign(addr, pretenure),
- Foreign);
+ // Statically ensure that it is safe to allocate foreigns in paged spaces.
+ STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
+ Map* map = *foreign_map();
+ HeapObject* result =
+ AllocateRawWithImmortalMap(map->instance_size(), pretenure, map);
+ Handle<Foreign> foreign(Foreign::cast(result), isolate());
+ foreign->set_foreign_address(addr);
+ return foreign;
}
-
Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
DCHECK_LE(0, length);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateByteArray(length, pretenure),
- ByteArray);
+ if (length > ByteArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ int size = ByteArray::SizeFor(length);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, pretenure, *byte_array_map());
+ Handle<ByteArray> array(ByteArray::cast(result), isolate());
+ array->set_length(length);
+ array->clear_padding();
+ return array;
}
-
Handle<BytecodeArray> Factory::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
DCHECK_LE(0, length);
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBytecodeArray(
- length, raw_bytecodes, frame_size,
- parameter_count, *constant_pool),
- BytecodeArray);
+ if (length > BytecodeArray::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
+ }
+ // Bytecode array is pretenured, so constant pool array should be too.
+ DCHECK(!isolate()->heap()->InNewSpace(*constant_pool));
+
+ int size = BytecodeArray::SizeFor(length);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+ Handle<BytecodeArray> instance(BytecodeArray::cast(result), isolate());
+ instance->set_length(length);
+ instance->set_frame_size(frame_size);
+ instance->set_parameter_count(parameter_count);
+ instance->set_incoming_new_target_or_generator_register(
+ interpreter::Register::invalid_value());
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
+ instance->set_osr_loop_nesting_level(0);
+ instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
+ instance->set_constant_pool(*constant_pool);
+ instance->set_handler_table(*empty_byte_array());
+ instance->set_source_position_table(*empty_byte_array());
+ CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
+ instance->clear_padding();
+
+ return instance;
}
-
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
PretenureFlag pretenure) {
DCHECK(0 <= length && length <= Smi::kMaxValue);
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateFixedTypedArrayWithExternalPointer(
- length, array_type, external_pointer, pretenure),
- FixedTypedArrayBase);
+ int size = FixedTypedArrayBase::kHeaderSize;
+ HeapObject* result = AllocateRawWithImmortalMap(
+ size, pretenure, isolate()->heap()->MapForFixedTypedArray(array_type));
+ Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
+ isolate());
+ elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
+ elements->set_length(length);
+ return elements;
}
-
Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
- int length, ExternalArrayType array_type, bool initialize,
- PretenureFlag pretenure) {
+ size_t length, size_t byte_length, ExternalArrayType array_type,
+ bool initialize, PretenureFlag pretenure) {
DCHECK(0 <= length && length <= Smi::kMaxValue);
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateFixedTypedArray(
- length, array_type, initialize, pretenure),
- FixedTypedArrayBase);
+ CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
+ size_t size =
+ OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
+ Map* map = isolate()->heap()->MapForFixedTypedArray(array_type);
+ AllocationAlignment alignment =
+ array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
+ HeapObject* object = AllocateRawWithImmortalMap(static_cast<int>(size),
+ pretenure, map, alignment);
+
+ Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(object),
+ isolate());
+ elements->set_base_pointer(*elements, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(
+ ExternalReference::fixed_typed_array_base_data_offset(isolate())
+ .address(),
+ SKIP_WRITE_BARRIER);
+ elements->set_length(static_cast<int>(length));
+ if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
+ return elements;
}
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateCell(*value),
- Cell);
+ STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(Cell::kSize, TENURED, *cell_map());
+ Handle<Cell> cell(Cell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
}
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFeedbackCell(
- isolate()->heap()->no_closures_cell_map(), *value),
- FeedbackCell);
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *no_closures_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
}
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFeedbackCell(
- isolate()->heap()->one_closure_cell_map(), *value),
- FeedbackCell);
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *one_closure_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
}
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateFeedbackCell(
- isolate()->heap()->many_closures_cell_map(), *value),
- FeedbackCell);
+ HeapObject* result = AllocateRawWithImmortalMap(FeedbackCell::kSize, TENURED,
+ *many_closures_cell_map());
+ Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
+ cell->set_value(*value);
+ return cell;
}
Handle<PropertyCell> Factory::NewPropertyCell(Handle<Name> name) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocatePropertyCell(*name),
- PropertyCell);
+ DCHECK(name->IsUniqueName());
+ STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result = AllocateRawWithImmortalMap(PropertyCell::kSize, TENURED,
+ *global_property_cell_map());
+ Handle<PropertyCell> cell(PropertyCell::cast(result), isolate());
+ cell->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ cell->set_property_details(PropertyDetails(Smi::kZero));
+ cell->set_name(*name);
+ cell->set_value(*the_hole_value());
+ return cell;
}
-
Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value) {
// It is safe to dereference the value because we are embedding it
// in cell and not inspecting its fields.
AllowDeferredHandleDereference convert_to_cell;
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateWeakCell(*value),
- WeakCell);
+ STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
+ HeapObject* result =
+ AllocateRawWithImmortalMap(WeakCell::kSize, TENURED, *weak_cell_map());
+ Handle<WeakCell> cell(WeakCell::cast(result), isolate());
+ cell->initialize(*value);
+ return cell;
}
-
Handle<TransitionArray> Factory::NewTransitionArray(int capacity) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateTransitionArray(capacity),
- TransitionArray);
+ Handle<TransitionArray> array = NewFixedArrayWithMap<TransitionArray>(
+ Heap::kTransitionArrayMapRootIndex, capacity, TENURED);
+ // Transition arrays are tenured. When black allocation is on we have to
+ // add the transition array to the list of encountered_transition_arrays.
+ Heap* heap = isolate()->heap();
+ if (heap->incremental_marking()->black_allocation()) {
+ heap->mark_compact_collector()->AddTransitionArray(*array);
+ }
+ return array;
}
-
Handle<AllocationSite> Factory::NewAllocationSite() {
Handle<Map> map = allocation_site_map();
- Handle<AllocationSite> site = New<AllocationSite>(map, OLD_SPACE);
+ Handle<AllocationSite> site(AllocationSite::cast(New(map, TENURED)),
+ isolate());
site->Initialize();
// Link the site
@@ -1351,94 +1637,289 @@ Handle<AllocationSite> Factory::NewAllocationSite() {
Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
ElementsKind elements_kind,
int inobject_properties) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateMap(type, instance_size, elements_kind,
- inobject_properties),
- Map);
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
+ HeapObject* result =
+ isolate()->heap()->AllocateRawWithRetry(Map::kSize, MAP_SPACE);
+ result->set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
+ return handle(InitializeMap(Map::cast(result), type, instance_size,
+ elements_kind, inobject_properties),
+ isolate());
+}
+
+Map* Factory::InitializeMap(Map* map, InstanceType type, int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
+ map->set_instance_type(type);
+ map->set_prototype(*null_value(), SKIP_WRITE_BARRIER);
+ map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
+ map->set_instance_size(instance_size);
+ if (map->IsJSObjectMap()) {
+ map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
+ inobject_properties);
+ DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
+ map->set_prototype_validity_cell(*invalid_prototype_validity_cell());
+ } else {
+ DCHECK_EQ(inobject_properties, 0);
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ }
+ map->set_dependent_code(DependentCode::cast(*empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
+ map->SetInObjectUnusedPropertyFields(inobject_properties);
+ map->set_instance_descriptors(*empty_descriptor_array());
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // Must be called only after |instance_type|, |instance_size| and
+ // |layout_descriptor| are set.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->set_bit_field(0);
+ map->set_bit_field2(Map::IsExtensibleBit::kMask);
+ DCHECK(!map->is_in_retained_map_list());
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ map->set_bit_field3(bit_field3);
+ map->set_elements_kind(elements_kind);
+ map->set_new_target_is_base(true);
+ isolate()->counters()->maps_created()->Increment();
+ if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
+ return map;
}
+Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> source) {
+ return CopyJSObjectWithAllocationSite(source, Handle<AllocationSite>());
+}
+
+Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
+ Handle<JSObject> source, Handle<AllocationSite> site) {
+ Handle<Map> map(source->map(), isolate());
+
+ // We can only clone regexps, normal objects, api objects, errors or arrays.
+ // Copying anything else will break invariants.
+ CHECK(map->instance_type() == JS_REGEXP_TYPE ||
+ map->instance_type() == JS_OBJECT_TYPE ||
+ map->instance_type() == JS_ERROR_TYPE ||
+ map->instance_type() == JS_ARRAY_TYPE ||
+ map->instance_type() == JS_API_OBJECT_TYPE ||
+ map->instance_type() == WASM_GLOBAL_TYPE ||
+ map->instance_type() == WASM_INSTANCE_TYPE ||
+ map->instance_type() == WASM_MEMORY_TYPE ||
+ map->instance_type() == WASM_MODULE_TYPE ||
+ map->instance_type() == WASM_TABLE_TYPE ||
+ map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
+ DCHECK(site.is_null() || AllocationSite::CanTrack(map->instance_type()));
+
+ int object_size = map->instance_size();
+ int adjusted_object_size =
+ site.is_null() ? object_size : object_size + AllocationMemento::kSize;
+ HeapObject* raw_clone =
+ isolate()->heap()->AllocateRawWithRetry(adjusted_object_size, NEW_SPACE);
+
+ SLOW_DCHECK(isolate()->heap()->InNewSpace(raw_clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ Heap::CopyBlock(raw_clone->address(), source->address(), object_size);
+ Handle<JSObject> clone(JSObject::cast(raw_clone), isolate());
+
+ if (!site.is_null()) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(raw_clone) + object_size);
+ InitializeAllocationMemento(alloc_memento, *site);
+ }
+
+ SLOW_DCHECK(clone->GetElementsKind() == source->GetElementsKind());
+ FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ // Update elements if necessary.
+ if (elements->length() > 0) {
+ FixedArrayBase* elem = nullptr;
+ if (elements->map() == *fixed_cow_array_map()) {
+ elem = elements;
+ } else if (source->HasDoubleElements()) {
+ elem = *CopyFixedDoubleArray(
+ handle(FixedDoubleArray::cast(elements), isolate()));
+ } else {
+ elem = *CopyFixedArray(handle(FixedArray::cast(elements), isolate()));
+ }
+ clone->set_elements(elem);
+ }
-Handle<JSObject> Factory::CopyJSObject(Handle<JSObject> object) {
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->CopyJSObject(*object, nullptr), JSObject);
+ // Update properties if necessary.
+ if (source->HasFastProperties()) {
+ PropertyArray* properties = source->property_array();
+ if (properties->length() > 0) {
+ // TODO(gsathya): Do not copy hash code.
+ Handle<PropertyArray> prop = CopyArrayWithMap(
+ handle(properties, isolate()), handle(properties->map(), isolate()));
+ clone->set_raw_properties_or_hash(*prop);
+ }
+ } else {
+ Handle<FixedArray> properties(
+ FixedArray::cast(source->property_dictionary()), isolate());
+ Handle<FixedArray> prop = CopyFixedArray(properties);
+ clone->set_raw_properties_or_hash(*prop);
+ }
+ return clone;
}
+namespace {
+template <typename T>
+void initialize_length(T* array, int length) {
+ array->set_length(length);
+}
-Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
- Handle<JSObject> object,
- Handle<AllocationSite> site) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyJSObject(
- *object, site.is_null() ? nullptr : *site),
- JSObject);
+template <>
+void initialize_length<PropertyArray>(PropertyArray* array, int length) {
+ array->initialize_length(length);
+}
+
+} // namespace
+
+template <typename T>
+Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
+ int len = src->length();
+ HeapObject* obj = AllocateRawFixedArray(len, NOT_TENURED);
+ obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
+
+ T* result = T::cast(obj);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ if (mode == SKIP_WRITE_BARRIER) {
+ // Eliminate the write barrier if possible.
+ Heap::CopyBlock(obj->address() + kPointerSize,
+ src->address() + kPointerSize,
+ T::SizeFor(len) - kPointerSize);
+ } else {
+ // Slow case: Just copy the content one-by-one.
+ initialize_length(result, len);
+ for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+ }
+ return Handle<T>(result, isolate());
+}
+
+template <typename T>
+Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
+ PretenureFlag pretenure) {
+ DCHECK_LT(0, grow_by);
+ DCHECK_LE(grow_by, kMaxInt - src->length());
+ int old_len = src->length();
+ int new_len = old_len + grow_by;
+ HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+
+ T* result = T::cast(obj);
+ initialize_length(result, new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
+ MemsetPointer(result->data_start() + old_len, *undefined_value(), grow_by);
+ return Handle<T>(result, isolate());
}
Handle<FixedArray> Factory::CopyFixedArrayWithMap(Handle<FixedArray> array,
Handle<Map> map) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyFixedArrayWithMap(*array, *map),
- FixedArray);
+ return CopyArrayWithMap(array, map);
}
Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
int grow_by,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->CopyArrayAndGrow(*array, grow_by, pretenure),
- FixedArray);
+ return CopyArrayAndGrow(array, grow_by, pretenure);
}
Handle<PropertyArray> Factory::CopyPropertyArrayAndGrow(
Handle<PropertyArray> array, int grow_by, PretenureFlag pretenure) {
- DCHECK_LE(0, grow_by);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->CopyArrayAndGrow(*array, grow_by, pretenure),
- PropertyArray);
+ return CopyArrayAndGrow(array, grow_by, pretenure);
}
Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
int new_len,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFixedArrayUpTo(
- *array, new_len, pretenure),
- FixedArray);
+ DCHECK_LE(0, new_len);
+ DCHECK_LE(new_len, array->length());
+ if (new_len == 0) return empty_fixed_array();
+
+ HeapObject* obj = AllocateRawFixedArray(new_len, pretenure);
+ obj->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ Handle<FixedArray> result(FixedArray::cast(obj), isolate());
+ result->set_length(new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < new_len; i++) result->set(i, array->get(i), mode);
+ return result;
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyFixedArray(*array),
- FixedArray);
+ if (array->length() == 0) return array;
+ return CopyArrayWithMap(array, handle(array->map(), isolate()));
}
-
Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
Handle<FixedArray> array) {
DCHECK(isolate()->heap()->InNewSpace(*array));
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyAndTenureFixedCOWArray(*array),
- FixedArray);
-}
+ Handle<FixedArray> result =
+ CopyFixedArrayUpTo(array, array->length(), TENURED);
+ // TODO(mvstanton): The map is set twice because of protection against calling
+ // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+ // we might then be able to remove this whole method.
+ result->set_map_after_allocation(*fixed_cow_array_map(), SKIP_WRITE_BARRIER);
+ return result;
+}
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyFixedDoubleArray(*array),
- FixedDoubleArray);
+ int len = array->length();
+ if (len == 0) return array;
+ Handle<FixedDoubleArray> result =
+ Handle<FixedDoubleArray>::cast(NewFixedDoubleArray(len, NOT_TENURED));
+ Heap::CopyBlock(
+ result->address() + FixedDoubleArray::kLengthOffset,
+ array->address() + FixedDoubleArray::kLengthOffset,
+ FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+ return result;
}
Handle<FeedbackVector> Factory::CopyFeedbackVector(
Handle<FeedbackVector> array) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFeedbackVector(*array),
- FeedbackVector);
+ int len = array->length();
+ HeapObject* obj = AllocateRawWithImmortalMap(
+ FeedbackVector::SizeFor(len), NOT_TENURED, *feedback_vector_map());
+ Handle<FeedbackVector> result(FeedbackVector::cast(obj), isolate());
+
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+ // Eliminate the write barrier if possible.
+ if (mode == SKIP_WRITE_BARRIER) {
+ Heap::CopyBlock(result->address() + kPointerSize,
+ result->address() + kPointerSize,
+ FeedbackVector::SizeFor(len) - kPointerSize);
+ } else {
+ // Slow case: Just copy the content one-by-one.
+ result->set_shared_function_info(array->shared_function_info());
+ result->set_optimized_code_weak_or_smi(array->optimized_code_weak_or_smi());
+ result->set_invocation_count(array->invocation_count());
+ result->set_profiler_ticks(array->profiler_ticks());
+ result->set_deopt_count(array->deopt_count());
+ for (int i = 0; i < len; i++) result->set(i, array->get(i), mode);
+ }
+ return result;
}
-Handle<Object> Factory::NewNumber(double value,
- PretenureFlag pretenure) {
- // Materialize as a SMI if possible
+Handle<Object> Factory::NewNumber(double value, PretenureFlag pretenure) {
+ // Materialize as a SMI if possible.
int32_t int_value;
if (DoubleToSmiInteger(value, &int_value)) {
return handle(Smi::FromInt(int_value), isolate());
@@ -1448,7 +1929,6 @@ Handle<Object> Factory::NewNumber(double value,
return NewHeapNumber(value, IMMUTABLE, pretenure);
}
-
Handle<Object> Factory::NewNumberFromInt(int32_t value,
PretenureFlag pretenure) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
@@ -1456,7 +1936,6 @@ Handle<Object> Factory::NewNumberFromInt(int32_t value,
return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure);
}
-
Handle<Object> Factory::NewNumberFromUint(uint32_t value,
PretenureFlag pretenure) {
int32_t int32v = static_cast<int32_t>(value);
@@ -1468,16 +1947,21 @@ Handle<Object> Factory::NewNumberFromUint(uint32_t value,
Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateHeapNumber(mode, pretenure),
- HeapNumber);
+ STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
+ Map* map = mode == MUTABLE ? *mutable_heap_number_map() : *heap_number_map();
+ HeapObject* result = AllocateRawWithImmortalMap(HeapNumber::kSize, pretenure,
+ map, kDoubleUnaligned);
+ return handle(HeapNumber::cast(result), isolate());
}
Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateBigInt(length, pretenure),
- FreshlyAllocatedBigInt);
+ if (length < 0 || length > BigInt::kMaxLength) {
+ isolate()->heap()->FatalProcessOutOfMemory("invalid BigInt length");
+ }
+ HeapObject* result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
+ pretenure, *bigint_map());
+ return handle(FreshlyAllocatedBigInt::cast(result), isolate());
}
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
@@ -1509,7 +1993,6 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
return scope.CloseAndEscape(result);
}
-
Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
Handle<String> message) {
// Construct a new error object. If an exception is thrown, use the exception
@@ -1561,15 +2044,14 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
Handle<Object> context_or_undefined,
PretenureFlag pretenure) {
- AllocationSpace space = pretenure == TENURED ? OLD_SPACE : NEW_SPACE;
- Handle<JSFunction> function = New<JSFunction>(map, space);
+ Handle<JSFunction> function(JSFunction::cast(New(map, pretenure)), isolate());
DCHECK(context_or_undefined->IsContext() ||
context_or_undefined->IsUndefined(isolate()));
function->initialize_properties();
function->initialize_elements();
function->set_shared(*info);
- function->set_code(info->code());
+ function->set_code(info->GetCode());
function->set_context(*context_or_undefined);
function->set_feedback_cell(*many_closures_cell());
int header_size;
@@ -1579,7 +2061,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
} else {
header_size = JSFunction::kSizeWithoutPrototype;
}
- isolate()->heap()->InitializeJSObjectBody(*function, *map, header_size);
+ InitializeJSObjectBody(function, map, header_size);
return function;
}
@@ -1597,9 +2079,8 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
// Create the SharedFunctionInfo.
Handle<Context> context(isolate()->native_context());
Handle<Map> map = args.GetMap(isolate());
- Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(args.name_, args.maybe_code_, map->is_constructor(),
- kNormalFunction, args.maybe_builtin_id_);
+ Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(
+ args.name_, args.maybe_code_, args.maybe_builtin_id_, kNormalFunction);
// Proper language mode in shared function info will be set later.
DCHECK(is_sloppy(info->language_mode()));
@@ -1608,7 +2089,6 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
#ifdef DEBUG
if (isolate()->bootstrapper()->IsActive()) {
Handle<Code> code;
- bool has_code = args.maybe_code_.ToHandle(&code);
DCHECK(
// During bootstrapping some of these maps could be not created yet.
(*map == context->get(Context::STRICT_FUNCTION_MAP_INDEX)) ||
@@ -1619,8 +2099,8 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Context::STRICT_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX)) ||
// Check if it's a creation of an empty or Proxy function during
// bootstrapping.
- (has_code && (code->builtin_index() == Builtins::kEmptyFunction ||
- code->builtin_index() == Builtins::kProxyConstructor)));
+ (args.maybe_builtin_id_ == Builtins::kEmptyFunction ||
+ args.maybe_builtin_id_ == Builtins::kProxyConstructor));
} else {
DCHECK(
(*map == *isolate()->sloppy_function_map()) ||
@@ -1703,10 +2183,8 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
return prototype;
}
-
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
- Handle<SharedFunctionInfo> info,
- Handle<Context> context,
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
PretenureFlag pretenure) {
Handle<Map> initial_map(
Map::cast(context->native_context()->get(info->function_map_index())));
@@ -1773,16 +2251,13 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
- Handle<FixedArray> array = NewFixedArray(length, TENURED);
- array->set_map_no_write_barrier(*scope_info_map());
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(array);
- return scope_info;
+ return NewFixedArrayWithMap<ScopeInfo>(Heap::kScopeInfoMapRootIndex, length,
+ TENURED);
}
Handle<ModuleInfo> Factory::NewModuleInfo() {
- Handle<FixedArray> array = NewFixedArray(ModuleInfo::kLength, TENURED);
- array->set_map_no_write_barrier(*module_info_map());
- return Handle<ModuleInfo>::cast(array);
+ return NewFixedArrayWithMap<ModuleInfo>(Heap::kModuleInfoMapRootIndex,
+ ModuleInfo::kLength, TENURED);
}
Handle<PreParsedScopeData> Factory::NewPreParsedScopeData() {
@@ -1801,8 +2276,9 @@ Handle<JSObject> Factory::NewExternal(void* value) {
}
Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
- Handle<CodeDataContainer> data_container =
- New<CodeDataContainer>(code_data_container_map(), OLD_SPACE);
+ Handle<CodeDataContainer> data_container(
+ CodeDataContainer::cast(New(code_data_container_map(), TENURED)),
+ isolate());
data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
data_container->set_kind_specific_flags(flags);
data_container->clear_padding();
@@ -1826,41 +2302,200 @@ Handle<Code> Factory::NewCode(
maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
: maybe_deopt_data.ToHandleChecked();
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateCode(
- desc, kind, self_ref, builtin_index, *reloc_info,
- *data_container, *source_position_table, *deopt_data,
- movability, stub_key, is_turbofanned, stack_slots,
- safepoint_table_offset, handler_table_offset),
- Code);
+ bool has_unwinding_info = desc.unwinding_info != nullptr;
+ DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
+ (!has_unwinding_info && desc.unwinding_info_size == 0));
+
+ // Compute size.
+ int body_size = desc.instr_size;
+ int unwinding_info_size_field_size = kInt64Size;
+ if (has_unwinding_info) {
+ body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
+ unwinding_info_size_field_size;
+ }
+ int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
+ DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
+
+ Heap* heap = isolate()->heap();
+ CodePageCollectionMemoryModificationScope code_allocation(heap);
+ HeapObject* result = heap->AllocateRawWithRetry(object_size, CODE_SPACE);
+
+ if (movability == kImmovable) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(result->address());
+ // Code objects which should stay at a fixed address are allocated either
+ // in the first page of code space, in large object space, or (during
+ // snapshot creation) the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result)) {
+ if (isolate()->serializer_enabled() ||
+ heap->code_space_->FirstPage()->Contains(result->address())) {
+ chunk->MarkNeverEvacuate();
+ } else {
+ // Discard the first code allocation, which was on a page where it could
+ // be moved.
+ heap->CreateFillerObjectAt(result->address(), object_size,
+ ClearRecordedSlots::kNo);
+ result = heap->AllocateRawCodeInLargeObjectSpace(object_size);
+ heap->UnprotectAndRegisterMemoryChunk(result);
+ heap->ZapCodeObject(result->address(), object_size);
+ heap->OnAllocationEvent(result, object_size);
+ }
+ }
+ }
+
+ result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
+ Handle<Code> code(Code::cast(result), isolate());
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(code->address()) ||
+ object_size <= heap->code_space()->AreaSize());
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+ code->set_raw_instruction_size(desc.instr_size);
+ code->set_relocation_info(*reloc_info);
+ code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
+ code->set_safepoint_table_offset(safepoint_table_offset);
+ code->set_handler_table_offset(handler_table_offset);
+ code->set_code_data_container(*data_container);
+ code->set_deoptimization_data(*deopt_data);
+ code->set_stub_key(stub_key);
+ code->set_source_position_table(*source_position_table);
+ code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
+ code->set_builtin_index(builtin_index);
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ if (!self_ref.is_null()) *(self_ref.location()) = *code;
+
+ // Migrate generated code.
+ // The generated code can contain Object** values (typically from handles)
+ // that are dereferenced during the copy to point directly to the actual heap
+ // objects. These pointers can include references to the code object itself,
+ // through the self_reference parameter.
+ code->CopyFrom(desc);
+
+ code->clear_padding();
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
+ DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
+ isolate()->heap()->memory_allocator()->code_range()->contains(
+ code->address()) ||
+ object_size <= isolate()->heap()->code_space()->AreaSize());
+ return code;
}
Handle<Code> Factory::NewCodeForDeserialization(uint32_t size) {
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateCode(size, kMovable),
- Code);
+ DCHECK(IsAligned(static_cast<intptr_t>(size), kCodeAlignment));
+ Heap* heap = isolate()->heap();
+ HeapObject* result = heap->AllocateRawWithRetry(size, CODE_SPACE);
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ heap->UnprotectAndRegisterMemoryChunk(result);
+ heap->ZapCodeObject(result->address(), size);
+ result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
+ DCHECK(IsAligned(bit_cast<intptr_t>(result->address()), kCodeAlignment));
+ DCHECK(!heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(result->address()) ||
+ static_cast<int>(size) <= heap->code_space()->AreaSize());
+ return handle(Code::cast(result), isolate());
+}
+
+#ifdef V8_EMBEDDED_BUILTINS
+Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry) {
+ DCHECK(isolate()->serializer_enabled());
+ DCHECK_NOT_NULL(isolate()->embedded_blob());
+ DCHECK_NE(0, isolate()->embedded_blob_size());
+ DCHECK(Builtins::IsEmbeddedBuiltin(*code));
+
+ Handle<Code> result =
+ Builtins::GenerateOffHeapTrampolineFor(isolate(), off_heap_entry);
+
+ // The trampoline code object must inherit specific flags from the original
+ // builtin (e.g. the safepoint-table offset). We set them manually here.
+
+ const int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ result->initialize_flags(code->kind(), code->has_unwinding_info(),
+ code->is_turbofanned(), stack_slots);
+ result->set_builtin_index(code->builtin_index());
+ result->set_handler_table_offset(code->handler_table_offset());
+ result->code_data_container()->set_kind_specific_flags(
+ code->code_data_container()->kind_specific_flags());
+ result->set_constant_pool_offset(code->constant_pool_offset());
+ if (code->has_safepoint_info()) {
+ result->set_safepoint_table_offset(code->safepoint_table_offset());
+ }
+
+ return result;
}
+#endif
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container =
NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyCode(*code, *data_container), Code);
-}
+ Heap* heap = isolate()->heap();
+ int obj_size = code->Size();
+ HeapObject* result = heap->AllocateRawWithRetry(obj_size, CODE_SPACE);
+
+ // Copy code object.
+ Address old_addr = code->address();
+ Address new_addr = result->address();
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ Handle<Code> new_code(Code::cast(result), isolate());
+
+ // Set the {CodeDataContainer}, it cannot be shared.
+ new_code->set_code_data_container(*data_container);
+
+ new_code->Relocate(new_addr - old_addr);
+ // We have to iterate over the object and process its pointers when black
+ // allocation is on.
+ heap->incremental_marking()->ProcessBlackAllocatedObject(*new_code);
+ // Record all references to embedded objects in the new code object.
+ heap->RecordWritesIntoCode(*new_code);
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) new_code->ObjectVerify();
+#endif
+ DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
+ DCHECK(
+ !heap->memory_allocator()->code_range()->valid() ||
+ heap->memory_allocator()->code_range()->contains(new_code->address()) ||
+ obj_size <= heap->code_space()->AreaSize());
+ return new_code;
+}
Handle<BytecodeArray> Factory::CopyBytecodeArray(
Handle<BytecodeArray> bytecode_array) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->CopyBytecodeArray(*bytecode_array),
- BytecodeArray);
+ int size = BytecodeArray::SizeFor(bytecode_array->length());
+ HeapObject* result =
+ AllocateRawWithImmortalMap(size, TENURED, *bytecode_array_map());
+
+ Handle<BytecodeArray> copy(BytecodeArray::cast(result), isolate());
+ copy->set_length(bytecode_array->length());
+ copy->set_frame_size(bytecode_array->frame_size());
+ copy->set_parameter_count(bytecode_array->parameter_count());
+ copy->set_incoming_new_target_or_generator_register(
+ bytecode_array->incoming_new_target_or_generator_register());
+ copy->set_constant_pool(bytecode_array->constant_pool());
+ copy->set_handler_table(bytecode_array->handler_table());
+ copy->set_source_position_table(bytecode_array->source_position_table());
+ copy->set_interrupt_budget(bytecode_array->interrupt_budget());
+ copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
+ copy->set_bytecode_age(bytecode_array->bytecode_age());
+ bytecode_array->CopyBytecodesTo(*copy);
+ return copy;
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
+ Handle<Map> map(constructor->initial_map());
+ return NewJSObjectFromMap(map, pretenure);
}
Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
@@ -1916,8 +2551,9 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
}
// Allocate the global object and initialize it with the backing store.
- Handle<JSGlobalObject> global = New<JSGlobalObject>(map, OLD_SPACE);
- isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map);
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(New(map, TENURED)),
+ isolate());
+ InitializeJSObjectFromMap(global, dictionary, map);
// Create a new map for the global object.
Handle<Map> new_map = Map::CopyDropDescriptors(map);
@@ -1933,17 +2569,69 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
return global;
}
+void Factory::InitializeJSObjectFromMap(Handle<JSObject> obj,
+ Handle<Object> properties,
+ Handle<Map> map) {
+ obj->set_raw_properties_or_hash(*properties);
+ obj->initialize_elements();
+ // TODO(1240798): Initialize the object's body using valid initial values
+ // according to the object's initial map. For example, if the map's
+ // instance type is JS_ARRAY_TYPE, the length field should be initialized
+ // to a number (e.g. Smi::kZero) and the elements initialized to a
+ // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
+ // verification code has to cope with (temporarily) invalid objects. See
+ // for example, JSArray::JSArrayVerify).
+ InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
+}
+
+void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
+ int start_offset) {
+ if (start_offset == map->instance_size()) return;
+ DCHECK_LT(start_offset, map->instance_size());
+
+ // We cannot always fill with one_pointer_filler_map because objects
+ // created from API functions expect their embedder fields to be initialized
+ // with undefined_value.
+ // Pre-allocated fields need to be initialized with undefined_value as well
+ // so that object accesses before the constructor completes (e.g. in the
+ // debugger) will not cause a crash.
+
+ // In case of Array subclassing the |map| could already be transitioned
+ // to different elements kind from the initial map on which we track slack.
+ bool in_progress = map->IsInobjectSlackTrackingInProgress();
+ Object* filler;
+ if (in_progress) {
+ filler = *one_pointer_filler_map();
+ } else {
+ filler = *undefined_value();
+ }
+ obj->InitializeBody(*map, start_offset, *undefined_value(), filler);
+ if (in_progress) {
+ map->FindRootMap()->InobjectSlackTrackingStep();
+ }
+}
Handle<JSObject> Factory::NewJSObjectFromMap(
- Handle<Map> map,
- PretenureFlag pretenure,
+ Handle<Map> map, PretenureFlag pretenure,
Handle<AllocationSite> allocation_site) {
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(
- *map, pretenure,
- allocation_site.is_null() ? nullptr : *allocation_site),
- JSObject);
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
+ DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+
+ HeapObject* obj =
+ AllocateRawWithAllocationSite(map, pretenure, allocation_site);
+ Handle<JSObject> js_obj(JSObject::cast(obj), isolate());
+
+ InitializeJSObjectFromMap(js_obj, empty_fixed_array(), map);
+
+ DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
+ js_obj->HasFastStringWrapperElements() ||
+ js_obj->HasFastArgumentsElements());
+ return js_obj;
}
Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
@@ -1989,10 +2677,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
return array;
}
-
-void Factory::NewJSArrayStorage(Handle<JSArray> array,
- int length,
- int capacity,
+void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
ArrayStorageAllocationMode mode) {
DCHECK(capacity >= length);
@@ -2058,10 +2743,7 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map),
- JSGeneratorObject);
+ return Handle<JSGeneratorObject>::cast(NewJSObjectFromMap(map));
}
Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
@@ -2101,19 +2783,8 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
shared == SharedFlag::kShared
? isolate()->native_context()->shared_array_buffer_fun()
: isolate()->native_context()->array_buffer_fun());
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
- *array_buffer_fun, pretenure),
- JSArrayBuffer);
-}
-
-
-Handle<JSDataView> Factory::NewJSDataView() {
- Handle<JSFunction> data_view_fun(
- isolate()->native_context()->data_view_fun());
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*data_view_fun),
- JSDataView);
+ Handle<Map> map(array_buffer_fun->initial_map(), isolate());
+ return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, pretenure));
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
@@ -2144,7 +2815,6 @@ Handle<JSMap> Factory::NewJSMap() {
return js_map;
}
-
Handle<JSSet> Factory::NewJSSet() {
Handle<Map> map(isolate()->native_context()->js_set_map());
Handle<JSSet> js_set = Handle<JSSet>::cast(NewJSObjectFromMap(map));
@@ -2172,72 +2842,54 @@ Handle<JSSetIterator> Factory::NewJSSetIterator(Handle<Map> map,
return result;
}
-ExternalArrayType Factory::GetArrayTypeFromElementsKind(ElementsKind kind) {
+void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
+ ExternalArrayType* array_type,
+ size_t* element_size) {
switch (kind) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
- return kExternal##Type##Array;
+ *array_type = kExternal##Type##Array; \
+ *element_size = size; \
+ break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
- }
#undef TYPED_ARRAY_CASE
-}
-size_t Factory::GetExternalArrayElementSize(ExternalArrayType type) {
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return size;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
}
-#undef TYPED_ARRAY_CASE
}
namespace {
-ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
- switch (type) {
+static void ForFixedTypedArray(ExternalArrayType array_type,
+ size_t* element_size,
+ ElementsKind* element_kind) {
+ switch (array_type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return TYPE##_ELEMENTS;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
- }
- UNREACHABLE();
-#undef TYPED_ARRAY_CASE
-}
+ *element_size = size; \
+ *element_kind = TYPE##_ELEMENTS; \
+ return;
-size_t GetFixedTypedArraysElementSize(ElementsKind kind) {
- switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- return size;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
- }
#undef TYPED_ARRAY_CASE
+ }
+ UNREACHABLE();
}
-
JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
Context* native_context = isolate->context()->native_context();
switch (type) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- return native_context->type##_array_fun();
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: \
+ return native_context->type##_array_fun();
TYPED_ARRAYS(TYPED_ARRAY_FUN)
#undef TYPED_ARRAY_FUN
-
- default:
- UNREACHABLE();
}
+ UNREACHABLE();
}
-
JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
Context* native_context = isolate->context()->native_context();
switch (elements_kind) {
@@ -2253,7 +2905,6 @@ JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
}
}
-
void SetupArrayBufferView(i::Isolate* isolate,
i::Handle<i::JSArrayBufferView> obj,
i::Handle<i::JSArrayBuffer> buffer,
@@ -2279,39 +2930,32 @@ void SetupArrayBufferView(i::Isolate* isolate,
obj->set_byte_length(*byte_length_object);
}
-
} // namespace
-
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
PretenureFlag pretenure) {
- Handle<JSFunction> typed_array_fun_handle(GetTypedArrayFun(type, isolate()));
-
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
- *typed_array_fun_handle, pretenure),
- JSTypedArray);
+ Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()));
+ Handle<Map> map(typed_array_fun->initial_map(), isolate());
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
}
-
Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
- Handle<JSFunction> typed_array_fun_handle(
- GetTypedArrayFun(elements_kind, isolate()));
-
- CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
- *typed_array_fun_handle, pretenure),
- JSTypedArray);
+ Handle<JSFunction> typed_array_fun(GetTypedArrayFun(elements_kind, isolate()),
+ isolate());
+ Handle<Map> map(typed_array_fun->initial_map(), isolate());
+ return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, pretenure));
}
-
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t length,
PretenureFlag pretenure) {
Handle<JSTypedArray> obj = NewJSTypedArray(type, pretenure);
- size_t element_size = GetExternalArrayElementSize(type);
- ElementsKind elements_kind = GetExternalArrayElementsKind(type);
+ size_t element_size;
+ ElementsKind elements_kind;
+ ForFixedTypedArray(type, &element_size, &elements_kind);
CHECK_EQ(byte_offset % element_size, 0);
@@ -2332,7 +2976,6 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
return obj;
}
-
Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
size_t number_of_elements,
PretenureFlag pretenure) {
@@ -2343,8 +2986,9 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
obj->SetEmbedderField(i, Smi::kZero);
}
- size_t element_size = GetFixedTypedArraysElementSize(elements_kind);
- ExternalArrayType array_type = GetArrayTypeFromElementsKind(elements_kind);
+ size_t element_size;
+ ExternalArrayType array_type;
+ TypeAndSizeForElementsKind(elements_kind, &array_type, &element_size);
CHECK(number_of_elements <=
(std::numeric_limits<size_t>::max() / element_size));
@@ -2365,21 +3009,21 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
SharedFlag::kNotShared);
obj->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
- static_cast<int>(number_of_elements), array_type, true, pretenure);
+ number_of_elements, byte_length, array_type, true, pretenure);
obj->set_elements(*elements);
return obj;
}
-
Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset,
size_t byte_length) {
- Handle<JSDataView> obj = NewJSDataView();
+ Handle<Map> map(isolate()->native_context()->data_view_fun()->initial_map(),
+ isolate());
+ Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSObjectFromMap(map));
SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
return obj;
}
-
MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
Vector<Handle<Object>> bound_args) {
@@ -2429,7 +3073,6 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
return result;
}
-
// ES6 section 9.5.15 ProxyCreate (target, handler)
Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
Handle<JSReceiver> handler) {
@@ -2445,7 +3088,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
map = Handle<Map>(isolate()->proxy_map());
}
DCHECK(map->prototype()->IsNull(isolate()));
- Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
+ Handle<JSProxy> result(JSProxy::cast(New(map, NOT_TENURED)), isolate());
result->initialize_properties();
result->set_target(*target);
result->set_handler(*handler);
@@ -2459,12 +3102,9 @@ Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
// Maintain invariant expected from any JSGlobalProxy.
map->set_is_access_check_needed(true);
map->set_may_have_interesting_symbols(true);
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
- JSGlobalProxy);
+ return Handle<JSGlobalProxy>::cast(NewJSObjectFromMap(map, NOT_TENURED));
}
-
void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
Handle<JSFunction> constructor) {
DCHECK(constructor->has_initial_map());
@@ -2494,30 +3134,18 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
// Reset the map for the object.
object->synchronized_set_map(*map);
- Heap* heap = isolate()->heap();
// Reinitialize the object from the constructor map.
- heap->InitializeJSObjectFromMap(*object, *raw_properties_or_hash, *map);
-}
-
-Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
- Handle<ScopeInfo> scope_info) {
- Handle<SharedFunctionInfo> shared =
- NewSharedFunctionInfo(name, code, IsConstructable(kind), kind);
- shared->set_scope_info(*scope_info);
- shared->set_outer_scope_info(*the_hole_value());
- return shared;
+ InitializeJSObjectFromMap(object, raw_properties_or_hash, map);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
- FunctionLiteral* literal, Handle<Script> script) {
- Handle<Code> code = BUILTIN_CODE(isolate(), CompileLazy);
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
- Handle<SharedFunctionInfo> result =
- NewSharedFunctionInfo(literal->name(), literal->kind(), code, scope_info);
- SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
- SharedFunctionInfo::SetScript(result, script, false);
- return result;
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel) {
+ FunctionKind kind = literal->kind();
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfoForBuiltin(
+ literal->name(), Builtins::kCompileLazy, kind);
+ SharedFunctionInfo::InitFromFunctionLiteral(shared, literal, is_toplevel);
+ SharedFunctionInfo::SetScript(shared, script, false);
+ return shared;
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
@@ -2525,7 +3153,8 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position, int end_position, Handle<Object> script,
Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
- Handle<JSMessageObject> message_obj = New<JSMessageObject>(map, NEW_SPACE);
+ Handle<JSMessageObject> message_obj(
+ JSMessageObject::cast(New(map, NOT_TENURED)), isolate());
message_obj->set_raw_properties_or_hash(*empty_fixed_array(),
SKIP_WRITE_BARRIER);
message_obj->initialize_elements();
@@ -2540,9 +3169,24 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
return message_obj;
}
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
+ MaybeHandle<String> maybe_name,
+ Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ maybe_name, function_template_info, Builtins::kNoBuiltinId, kind);
+ return shared;
+}
+
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
+ MaybeHandle<String> maybe_name, int builtin_index, FunctionKind kind) {
+ Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
+ maybe_name, MaybeHandle<Code>(), builtin_index, kind);
+ return shared;
+}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
- MaybeHandle<String> maybe_name, MaybeHandle<Code> maybe_code,
- bool is_constructor, FunctionKind kind, int maybe_builtin_index) {
+ MaybeHandle<String> maybe_name, MaybeHandle<HeapObject> maybe_function_data,
+ int maybe_builtin_index, FunctionKind kind) {
// Function names are assumed to be flat elsewhere. Must flatten before
// allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
Handle<String> shared_name;
@@ -2552,64 +3196,73 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
}
Handle<Map> map = shared_function_info_map();
- Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
-
- // Set pointer fields.
- share->set_raw_name(has_shared_name
- ? *shared_name
- : SharedFunctionInfo::kNoSharedNameSentinel);
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- code = BUILTIN_CODE(isolate(), Illegal);
- }
- Object* function_data = (Builtins::IsBuiltinId(maybe_builtin_index) &&
- Builtins::IsLazy(maybe_builtin_index))
- ? Smi::FromInt(maybe_builtin_index)
- : Object::cast(*undefined_value());
- share->set_function_data(function_data, SKIP_WRITE_BARRIER);
- share->set_code(*code);
- share->set_scope_info(ScopeInfo::Empty(isolate()));
- share->set_outer_scope_info(*the_hole_value());
- DCHECK(!Builtins::IsLazy(Builtins::kConstructedNonConstructable));
- Handle<Code> construct_stub =
- is_constructor ? isolate()->builtins()->JSConstructStubGeneric()
- : BUILTIN_CODE(isolate(), ConstructedNonConstructable);
- share->SetConstructStub(*construct_stub);
- share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
- share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
- Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(isolate());
- share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
- share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
+ Handle<SharedFunctionInfo> share(SharedFunctionInfo::cast(New(map, TENURED)),
+ isolate());
+ {
+ DisallowHeapAllocation no_allocation;
+
+ // Set pointer fields.
+ share->set_name_or_scope_info(
+ has_shared_name ? *shared_name
+ : SharedFunctionInfo::kNoSharedNameSentinel);
+ Handle<HeapObject> function_data;
+ if (maybe_function_data.ToHandle(&function_data)) {
+ // If we pass function_data then we shouldn't pass a builtin index, and
+ // the function_data should not be code with a builtin.
+ DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
+ DCHECK_IMPLIES(function_data->IsCode(),
+ !Code::cast(*function_data)->is_builtin());
+ share->set_function_data(*function_data);
+ } else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
+ DCHECK_NE(maybe_builtin_index, Builtins::kDeserializeLazy);
+ share->set_builtin_id(maybe_builtin_index);
+ } else {
+ share->set_builtin_id(Builtins::kIllegal);
+ }
+ // Generally functions won't have feedback, unless they have been created
+ // from a FunctionLiteral. Those can just reset this field to keep the
+ // SharedFunctionInfo in a consistent state.
+ if (maybe_builtin_index == Builtins::kCompileLazy) {
+ share->set_raw_outer_scope_info_or_feedback_metadata(*the_hole_value(),
+ SKIP_WRITE_BARRIER);
+ } else {
+ share->set_raw_outer_scope_info_or_feedback_metadata(
+ *empty_feedback_metadata(), SKIP_WRITE_BARRIER);
+ }
+ share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
+ share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
#if V8_SFI_HAS_UNIQUE_ID
- share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+ share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
- // Set integer fields (smi or int, depending on the architecture).
- share->set_length(0);
- share->set_internal_formal_parameter_count(0);
- share->set_expected_nof_properties(0);
- share->set_start_position_and_type(0);
- share->set_end_position(0);
- share->set_function_token_position(0);
- // All compiler hints default to false or 0.
- share->set_compiler_hints(0);
- share->set_kind(kind);
-
- share->clear_padding();
+ // Set integer fields (smi or int, depending on the architecture).
+ share->set_length(0);
+ share->set_internal_formal_parameter_count(0);
+ share->set_expected_nof_properties(0);
+ share->set_raw_start_position_and_type(0);
+ share->set_raw_end_position(0);
+ share->set_function_token_position(0);
+ // All flags default to false or 0.
+ share->set_flags(0);
+ share->CalculateConstructAsBuiltin();
+ share->set_kind(kind);
+ share->clear_padding();
+ }
// Link into the list.
Handle<Object> new_noscript_list =
- WeakFixedArray::Add(noscript_shared_function_infos(), share);
+ FixedArrayOfWeakCells::Add(noscript_shared_function_infos(), share);
isolate()->heap()->set_noscript_shared_function_infos(*new_noscript_list);
+ DCHECK_EQ(SharedFunctionInfo::kNoDebuggingId, share->debugging_id());
#ifdef VERIFY_HEAP
share->SharedFunctionInfoVerify();
#endif
return share;
}
-
static inline int NumberCacheHash(Handle<FixedArray> cache,
Handle<Object> number) {
int mask = (cache->length() >> 1) - 1;
@@ -2621,7 +3274,6 @@ static inline int NumberCacheHash(Handle<FixedArray> cache,
}
}
-
Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
DisallowHeapAllocation no_gc;
int hash = NumberCacheHash(number_string_cache(), number);
@@ -2634,7 +3286,6 @@ Handle<Object> Factory::GetNumberStringCache(Handle<Object> number) {
return undefined_value();
}
-
void Factory::SetNumberStringCache(Handle<Object> number,
Handle<String> string) {
int hash = NumberCacheHash(number_string_cache(), number);
@@ -2650,7 +3301,6 @@ void Factory::SetNumberStringCache(Handle<Object> number,
number_string_cache()->set(hash * 2 + 1, *string);
}
-
Handle<String> Factory::NumberToString(Handle<Object> number,
bool check_number_string_cache) {
isolate()->counters()->number_to_string_runtime()->Increment();
@@ -2838,7 +3488,7 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
UNREACHABLE();
break;
}
- return New<LoadHandler>(map, OLD_SPACE);
+ return handle(LoadHandler::cast(New(map, TENURED)), isolate());
}
Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
@@ -2860,13 +3510,11 @@ Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
UNREACHABLE();
break;
}
- return New<StoreHandler>(map, OLD_SPACE);
+ return handle(StoreHandler::cast(New(map, TENURED)), isolate());
}
-void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
+void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
Handle<Object> data) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kAtomDataSize);
@@ -2877,12 +3525,9 @@ void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
regexp->set_data(*store);
}
-
void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
- int capture_count) {
+ JSRegExp::Type type, Handle<String> source,
+ JSRegExp::Flags flags, int capture_count) {
Handle<FixedArray> store = NewFixedArray(JSRegExp::kIrregexpDataSize);
Smi* uninitialized = Smi::FromInt(JSRegExp::kUninitializedValue);
store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
@@ -2891,8 +3536,7 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
- store->set(JSRegExp::kIrregexpCaptureCountIndex,
- Smi::FromInt(capture_count));
+ store->set(JSRegExp::kIrregexpCaptureCountIndex, Smi::FromInt(capture_count));
store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
regexp->set_data(*store);
}
@@ -2922,7 +3566,6 @@ Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
return Handle<Object>::null();
}
-
Handle<Object> Factory::ToBoolean(bool value) {
return value ? true_value() : false_value();
}
@@ -3127,10 +3770,14 @@ Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
}
Handle<JSPromise> Factory::NewJSPromiseWithoutHook(PretenureFlag pretenure) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateJSPromise(
- *isolate()->promise_function(), pretenure),
- JSPromise);
+ Handle<JSPromise> promise = Handle<JSPromise>::cast(
+ NewJSObject(isolate()->promise_function(), pretenure));
+ promise->set_reactions_or_result(Smi::kZero);
+ promise->set_flags(0);
+ for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
+ promise->SetEmbedderField(i, Smi::kZero);
+ }
+ return promise;
}
Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
@@ -3139,6 +3786,19 @@ Handle<JSPromise> Factory::NewJSPromise(PretenureFlag pretenure) {
return promise;
}
+Handle<CallHandlerInfo> Factory::NewCallHandlerInfo(bool has_no_side_effect) {
+ Handle<Map> map = has_no_side_effect
+ ? side_effect_free_call_handler_info_map()
+ : side_effect_call_handler_info_map();
+ Handle<CallHandlerInfo> info(CallHandlerInfo::cast(New(map, TENURED)),
+ isolate());
+ Object* undefined_value = isolate()->heap()->undefined_value();
+ info->set_callback(undefined_value);
+ info->set_js_callback(undefined_value);
+ info->set_data(undefined_value);
+ return info;
+}
+
// static
NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
Handle<Map> map) {
@@ -3154,14 +3814,12 @@ NewFunctionArgs NewFunctionArgs::ForWasm(Handle<String> name, Handle<Code> code,
// static
NewFunctionArgs NewFunctionArgs::ForBuiltin(Handle<String> name,
- Handle<Code> code, Handle<Map> map,
- int builtin_id) {
+ Handle<Map> map, int builtin_id) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
- args.maybe_code_ = code;
args.maybe_builtin_id_ = builtin_id;
args.language_mode_ = LanguageMode::kStrict;
args.prototype_mutability_ = MUTABLE;
@@ -3177,6 +3835,7 @@ NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
+ args.maybe_builtin_id_ = Builtins::kIllegal;
args.language_mode_ = language_mode;
args.prototype_mutability_ = MUTABLE;
@@ -3187,14 +3846,13 @@ NewFunctionArgs NewFunctionArgs::ForFunctionWithoutCode(
// static
NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
- Handle<String> name, Handle<Code> code, Handle<Object> prototype,
- InstanceType type, int instance_size, int inobject_properties,
- int builtin_id, MutableMode prototype_mutability) {
+ Handle<String> name, Handle<Object> prototype, InstanceType type,
+ int instance_size, int inobject_properties, int builtin_id,
+ MutableMode prototype_mutability) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
NewFunctionArgs args;
args.name_ = name;
- args.maybe_code_ = code;
args.type_ = type;
args.instance_size_ = instance_size;
args.inobject_properties_ = inobject_properties;
@@ -3212,13 +3870,11 @@ NewFunctionArgs NewFunctionArgs::ForBuiltinWithPrototype(
// static
NewFunctionArgs NewFunctionArgs::ForBuiltinWithoutPrototype(
- Handle<String> name, Handle<Code> code, int builtin_id,
- LanguageMode language_mode) {
+ Handle<String> name, int builtin_id, LanguageMode language_mode) {
DCHECK(Builtins::IsBuiltinId(builtin_id));
NewFunctionArgs args;
args.name_ = name;
- args.maybe_code_ = code;
args.maybe_builtin_id_ = builtin_id;
args.language_mode_ = language_mode;
args.prototype_mutability_ = MUTABLE;
diff --git a/deps/v8/src/factory.h b/deps/v8/src/heap/factory.h
index 966b0602fe..ead1d0d24c 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -2,48 +2,55 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FACTORY_H_
-#define V8_FACTORY_H_
+#ifndef V8_HEAP_FACTORY_H_
+#define V8_HEAP_FACTORY_H_
-#include "src/feedback-vector.h"
+#include "src/builtins/builtins.h"
#include "src/globals.h"
-#include "src/ic/handler-configuration.h"
-#include "src/isolate.h"
+#include "src/handles.h"
+#include "src/heap/heap.h"
#include "src/messages.h"
-#include "src/objects/data-handler.h"
-#include "src/objects/descriptor-array.h"
+#include "src/objects/code.h"
#include "src/objects/dictionary.h"
+#include "src/objects/hash-table.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
-#include "src/objects/scope-info.h"
#include "src/objects/string.h"
-#include "src/string-hasher.h"
namespace v8 {
namespace internal {
// Forward declarations.
class AliasedArgumentsEntry;
-class BreakPointInfo;
-class BreakPoint;
class BoilerplateDescription;
+class BreakPoint;
+class BreakPointInfo;
class CallableTask;
class CallbackTask;
class ConstantElementsPair;
class CoverageInfo;
class DebugInfo;
+class EnumCache;
class FreshlyAllocatedBigInt;
+class Isolate;
class JSMap;
class JSMapIterator;
class JSModuleNamespace;
class JSSet;
class JSSetIterator;
class JSWeakMap;
+class LoadHandler;
+class ModuleInfo;
class NewFunctionArgs;
-struct SourceRange;
class PreParsedScopeData;
class PromiseResolveThenableJobTask;
+class RegExpMatchInfo;
+class ScriptContextTable;
+class StoreHandler;
class TemplateObjectDescription;
+struct SourceRange;
+template <typename T>
+class ZoneVector;
enum FunctionMode {
kWithNameBit = 1 << 0,
@@ -82,12 +89,20 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a fixed array-like object with given map and initialized with
// undefined values.
- Handle<FixedArray> NewFixedArrayWithMap(Heap::RootListIndex map_root_index,
- int length, PretenureFlag pretenure);
+ template <typename T = FixedArray>
+ Handle<T> NewFixedArrayWithMap(Heap::RootListIndex map_root_index, int length,
+ PretenureFlag pretenure = NOT_TENURED);
// Allocates a fixed array initialized with undefined values.
Handle<FixedArray> NewFixedArray(int length,
PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a fixed array which may contain in-place weak references. The
+ // array is initialized with undefined values
+ Handle<WeakFixedArray> NewWeakFixedArray(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a property array initialized with undefined values.
Handle<PropertyArray> NewPropertyArray(int length,
PretenureFlag pretenure = NOT_TENURED);
// Tries allocating a fixed array initialized with undefined values.
@@ -95,7 +110,7 @@ class V8_EXPORT_PRIVATE Factory final {
// The caller has to manually signal an
// v8::internal::Heap::FatalProcessOutOfMemory typically by calling
// NewFixedArray as a fallback.
- MUST_USE_RESULT
+ V8_WARN_UNUSED_RESULT
MaybeHandle<FixedArray> TryNewFixedArray(
int length, PretenureFlag pretenure = NOT_TENURED);
@@ -120,16 +135,17 @@ class V8_EXPORT_PRIVATE Factory final {
bool has_seen_proto);
// Allocate a new uninitialized fixed double array.
- // The function returns a pre-allocated empty fixed array for capacity = 0,
+ // The function returns a pre-allocated empty fixed array for length = 0,
// so the return type must be the general fixed array class.
Handle<FixedArrayBase> NewFixedDoubleArray(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed double array with hole values.
Handle<FixedArrayBase> NewFixedDoubleArrayWithHoles(
- int size,
- PretenureFlag pretenure = NOT_TENURED);
+ int size, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a FeedbackMedata object and zeroes the data section.
+ Handle<FeedbackMetadata> NewFeedbackMetadata(int slot_count);
Handle<FrameArray> NewFrameArray(int number_of_frames,
PretenureFlag pretenure = NOT_TENURED);
@@ -138,10 +154,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<OrderedHashMap> NewOrderedHashMap();
Handle<SmallOrderedHashSet> NewSmallOrderedHashSet(
- int size = SmallOrderedHashSet::kMinCapacity,
+ int capacity = SmallOrderedHashSet::kMinCapacity,
PretenureFlag pretenure = NOT_TENURED);
Handle<SmallOrderedHashMap> NewSmallOrderedHashMap(
- int size = SmallOrderedHashMap::kMinCapacity,
+ int capacity = SmallOrderedHashMap::kMinCapacity,
PretenureFlag pretenure = NOT_TENURED);
// Create a new PrototypeInfo struct.
@@ -182,12 +198,12 @@ class V8_EXPORT_PRIVATE Factory final {
}
Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
- Handle<String> InternalizeOneByteString(
- Handle<SeqOneByteString>, int from, int length);
+ Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>, int from,
+ int length);
Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
- template<class StringTableKey>
+ template <class StringTableKey>
Handle<String> InternalizeStringWithKey(StringTableKey* key);
// Internalized strings are created in the old generation (data space).
@@ -215,7 +231,7 @@ class V8_EXPORT_PRIVATE Factory final {
// will be converted to Latin1, otherwise it will be left as two-byte.
//
// One-byte strings are pretenured when used as keys in the SourceCodeCache.
- MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromOneByte(
Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
template <size_t N>
@@ -227,25 +243,24 @@ class V8_EXPORT_PRIVATE Factory final {
}
inline Handle<String> NewStringFromAsciiChecked(
- const char* str,
- PretenureFlag pretenure = NOT_TENURED) {
- return NewStringFromOneByte(
- OneByteVector(str), pretenure).ToHandleChecked();
+ const char* str, PretenureFlag pretenure = NOT_TENURED) {
+ return NewStringFromOneByte(OneByteVector(str), pretenure)
+ .ToHandleChecked();
}
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8(
Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int end,
PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
const ZoneVector<uc16>* str, PretenureFlag pretenure = NOT_TENURED);
Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
@@ -270,7 +285,7 @@ class V8_EXPORT_PRIVATE Factory final {
// Compute the matching internalized string map for a string if possible.
// Empty handle is returned if string is in new space or not flattened.
- MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
+ V8_WARN_UNUSED_RESULT MaybeHandle<Map> InternalizedStringMapForString(
Handle<String> string);
// Creates an internalized copy of an external string. |string| must be
@@ -281,33 +296,29 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates and partially initializes an one-byte or two-byte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- MUST_USE_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
- int length,
- PretenureFlag pretenure = NOT_TENURED);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
+ int length, PretenureFlag pretenure = NOT_TENURED);
+ V8_WARN_UNUSED_RESULT MaybeHandle<SeqTwoByteString> NewRawTwoByteString(
+ int length, PretenureFlag pretenure = NOT_TENURED);
// Creates a single character string where the character has given code.
// A cache is used for Latin1 codes.
Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
// Create a new cons string object which consists of a pair of strings.
- MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
- Handle<String> right);
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
+ Handle<String> right);
- MUST_USE_RESULT Handle<String> NewConsString(Handle<String> left,
- Handle<String> right, int length,
- bool one_byte);
+ V8_WARN_UNUSED_RESULT Handle<String> NewConsString(Handle<String> left,
+ Handle<String> right,
+ int length, bool one_byte);
// Create or lookup a single characters tring made up of a utf16 surrogate
// pair.
Handle<String> NewSurrogatePairString(uint16_t lead, uint16_t trail);
// Create a new string object which holds a proper substring of a string.
- Handle<String> NewProperSubString(Handle<String> str,
- int begin,
- int end);
+ Handle<String> NewProperSubString(Handle<String> str, int begin, int end);
// Create a new string object which holds a substring of a string.
inline Handle<String> NewSubString(Handle<String> str, int begin, int end);
@@ -317,16 +328,16 @@ class V8_EXPORT_PRIVATE Factory final {
// not make sense to have a UTF-8 factory function for external strings,
// because we cannot change the underlying buffer. Note that these strings
// are backed by a string resource that resides outside the V8 heap.
- MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromOneByte(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewExternalStringFromOneByte(
const ExternalOneByteString::Resource* resource);
- MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
+ V8_WARN_UNUSED_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
// Create a new external string object for one-byte encoded native script.
// It does not cache the resource data pointer.
Handle<ExternalOneByteString> NewNativeSourceString(
const ExternalOneByteString::Resource* resource);
- // Create a symbol.
+ // Create a symbol in old space.
Handle<Symbol> NewSymbol();
Handle<Symbol> NewPrivateSymbol();
Handle<Symbol> NewPrivateFieldSymbol();
@@ -417,8 +428,8 @@ class V8_EXPORT_PRIVATE Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<FixedTypedArrayBase> NewFixedTypedArray(
- int length, ExternalArrayType array_type, bool initialize,
- PretenureFlag pretenure = NOT_TENURED);
+ size_t length, size_t byte_length, ExternalArrayType array_type,
+ bool initialize, PretenureFlag pretenure = NOT_TENURED);
Handle<Cell> NewCell(Handle<Object> value);
@@ -432,21 +443,30 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<TransitionArray> NewTransitionArray(int capacity);
- // Allocate a tenured AllocationSite. It's payload is null.
+ // Allocate a tenured AllocationSite. Its payload is null.
Handle<AllocationSite> NewAllocationSite();
+ // Allocates and initializes a new Map.
Handle<Map> NewMap(InstanceType type, int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0);
-
- Handle<HeapObject> NewFillerObject(int size,
- bool double_align,
+ // Initializes the fields of a newly created Map. Exposed for tests and
+ // heap setup; other code should just call NewMap which takes care of it.
+ Map* InitializeMap(Map* map, InstanceType type, int instance_size,
+ ElementsKind elements_kind, int inobject_properties);
+
+ // Allocate a block of memory in the given space (filled with a filler).
+ // Used as a fall-back for generated code when the space is full.
+ Handle<HeapObject> NewFillerObject(int size, bool double_align,
AllocationSpace space);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
+ // Returns a deep copy of the JavaScript object.
+ // Properties and elements are copied too.
Handle<JSObject> CopyJSObject(Handle<JSObject> object);
-
+ // Same as above, but also takes an AllocationSite to be appended in an
+ // AllocationMemento.
Handle<JSObject> CopyJSObjectWithAllocationSite(Handle<JSObject> object,
Handle<AllocationSite> site);
@@ -470,20 +490,18 @@ class V8_EXPORT_PRIVATE Factory final {
// of it in old space.
Handle<FixedArray> CopyAndTenureFixedCOWArray(Handle<FixedArray> array);
- Handle<FixedDoubleArray> CopyFixedDoubleArray(
- Handle<FixedDoubleArray> array);
+ Handle<FixedDoubleArray> CopyFixedDoubleArray(Handle<FixedDoubleArray> array);
Handle<FeedbackVector> CopyFeedbackVector(Handle<FeedbackVector> array);
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
- Handle<Object> NewNumber(double value,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<Object> NewNumber(double value, PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromInt(int32_t value,
PretenureFlag pretenure = NOT_TENURED);
Handle<Object> NewNumberFromUint(uint32_t value,
- PretenureFlag pretenure = NOT_TENURED);
+ PretenureFlag pretenure = NOT_TENURED);
inline Handle<Object> NewNumberFromSize(
size_t value, PretenureFlag pretenure = NOT_TENURED);
inline Handle<Object> NewNumberFromInt64(
@@ -509,6 +527,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
@@ -520,11 +540,13 @@ class V8_EXPORT_PRIVATE Factory final {
// Global objects are pretenured and initialized based on a constructor.
Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
Handle<JSObject> NewJSObjectFromMap(
- Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED,
+ Handle<Map> map, PretenureFlag pretenure = NOT_TENURED,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
Handle<JSObject> NewSlowJSObjectFromMap(
Handle<Map> map,
@@ -561,9 +583,7 @@ class V8_EXPORT_PRIVATE Factory final {
PretenureFlag pretenure = NOT_TENURED);
void NewJSArrayStorage(
- Handle<JSArray> array,
- int length,
- int capacity,
+ Handle<JSArray> array, int length, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
Handle<JSWeakMap> NewJSWeakMap();
@@ -578,8 +598,9 @@ class V8_EXPORT_PRIVATE Factory final {
SharedFlag shared = SharedFlag::kNotShared,
PretenureFlag pretenure = NOT_TENURED);
- ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind);
- size_t GetExternalArrayElementSize(ExternalArrayType type);
+ static void TypeAndSizeForElementsKind(ElementsKind kind,
+ ExternalArrayType* array_type,
+ size_t* element_size);
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
PretenureFlag pretenure = NOT_TENURED);
@@ -598,7 +619,6 @@ class V8_EXPORT_PRIVATE Factory final {
size_t number_of_elements,
PretenureFlag pretenure = NOT_TENURED);
- Handle<JSDataView> NewJSDataView();
Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t byte_length);
@@ -670,7 +690,6 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a serialized scope info.
Handle<ScopeInfo> NewScopeInfo(int length);
- Handle<ModuleInfoEntry> NewModuleInfoEntry();
Handle<ModuleInfo> NewModuleInfo();
Handle<PreParsedScopeData> NewPreParsedScopeData();
@@ -681,6 +700,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Creates a new CodeDataContainer for a Code object.
Handle<CodeDataContainer> NewCodeDataContainer(int flags);
+ // Allocates a new code object (fully initialized). All header fields of the
+ // returned object are immutable and the code object is write protected.
// The reference to the Code object is stored in self_reference.
// This allows generated code to reference its own Code object
// by containing this handle.
@@ -698,8 +719,17 @@ class V8_EXPORT_PRIVATE Factory final {
// Allocates a new, empty code object for use by builtin deserialization. The
// given {size} argument specifies the size of the entire code object.
+ // Can only be used when code space is unprotected and requires manual
+ // initialization by the caller.
Handle<Code> NewCodeForDeserialization(uint32_t size);
+#ifdef V8_EMBEDDED_BUILTINS
+ // Allocates a new code object and initializes it as the trampoline to the
+ // given off-heap entry point.
+ Handle<Code> NewOffHeapTrampolineFor(Handle<Code> code,
+ Address off_heap_entry);
+#endif
+
Handle<Code> CopyCode(Handle<Code> code);
Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
@@ -739,8 +769,6 @@ class V8_EXPORT_PRIVATE Factory final {
inline Handle<String> Uint32ToString(uint32_t value);
- Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
-
#define ROOT_ACCESSOR(type, name, camel_name) inline Handle<type> name();
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
@@ -773,16 +801,16 @@ class V8_EXPORT_PRIVATE Factory final {
#undef ACCESSOR_INFO_ACCESSOR
// Allocates a new SharedFunctionInfo object.
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- MaybeHandle<String> name, FunctionKind kind, Handle<Code> code,
- Handle<ScopeInfo> scope_info);
- Handle<SharedFunctionInfo> NewSharedFunctionInfo(
- MaybeHandle<String> name, MaybeHandle<Code> code, bool is_constructor,
- FunctionKind kind = kNormalFunction,
- int maybe_builtin_index = Builtins::kNoBuiltinId);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForApiFunction(
+ MaybeHandle<String> maybe_name,
+ Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
+ MaybeHandle<String> name, int builtin_index,
+ FunctionKind kind = kNormalFunction);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
- FunctionLiteral* literal, Handle<Script> script);
+ FunctionLiteral* literal, Handle<Script> script, bool is_toplevel);
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
return (function_mode & kWithPrototypeBits) != 0;
@@ -832,18 +860,14 @@ class V8_EXPORT_PRIVATE Factory final {
// Creates a new FixedArray that holds the data associated with the
// atom regexp and stores it in the regexp.
- void SetRegExpAtomData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
+ void SetRegExpAtomData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
Handle<Object> match_pattern);
// Creates a new FixedArray that holds the data associated with the
// irregexp regexp and stores it in the regexp.
- void SetRegExpIrregexpData(Handle<JSRegExp> regexp,
- JSRegExp::Type type,
- Handle<String> source,
- JSRegExp::Flags flags,
+ void SetRegExpIrregexpData(Handle<JSRegExp> regexp, JSRegExp::Type type,
+ Handle<String> source, JSRegExp::Flags flags,
int capture_count);
// Returns the value for a known global constant (a property of the global
@@ -859,19 +883,48 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSPromise> NewJSPromise(PretenureFlag pretenure = NOT_TENURED);
+ Handle<CallHandlerInfo> NewCallHandlerInfo(bool has_no_side_effect = false);
+
+ HeapObject* NewForTest(Handle<Map> map, PretenureFlag pretenure) {
+ return New(map, pretenure);
+ }
+
private:
Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
+ HeapObject* AllocateRawWithImmortalMap(
+ int size, PretenureFlag pretenure, Map* map,
+ AllocationAlignment alignment = kWordAligned);
+ HeapObject* AllocateRawWithAllocationSite(
+ Handle<Map> map, PretenureFlag pretenure,
+ Handle<AllocationSite> allocation_site);
+
+ // Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
+ HeapObject* AllocateRawArray(int size, PretenureFlag pretenure);
+ HeapObject* AllocateRawFixedArray(int length, PretenureFlag pretenure);
+ Handle<FixedArray> NewFixedArrayWithFiller(Heap::RootListIndex map_root_index,
+ int length, Object* filler,
+ PretenureFlag pretenure);
+
// Creates a heap object based on the map. The fields of the heap object are
- // not initialized by New<>() functions. It's the responsibility of the caller
- // to do that.
- template<typename T>
- Handle<T> New(Handle<Map> map, AllocationSpace space);
+ // not initialized, it's the responsibility of the caller to do that.
+ HeapObject* New(Handle<Map> map, PretenureFlag pretenure);
- template<typename T>
- Handle<T> New(Handle<Map> map,
- AllocationSpace space,
- Handle<AllocationSite> allocation_site);
+ template <typename T>
+ Handle<T> CopyArrayWithMap(Handle<T> src, Handle<Map> map);
+ template <typename T>
+ Handle<T> CopyArrayAndGrow(Handle<T> src, int grow_by,
+ PretenureFlag pretenure);
+
+ template <bool is_one_byte, typename T>
+ Handle<String> AllocateInternalizedStringImpl(T t, int chars,
+ uint32_t hash_field);
+
+ Handle<SeqOneByteString> AllocateRawOneByteInternalizedString(
+ int length, uint32_t hash_field);
+
+ Handle<String> AllocateTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field);
MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
PretenureFlag pretenure);
@@ -889,6 +942,20 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSPromise> NewJSPromiseWithoutHook(
PretenureFlag pretenure = NOT_TENURED);
+
+ Handle<SharedFunctionInfo> NewSharedFunctionInfo(
+ MaybeHandle<String> name, MaybeHandle<HeapObject> maybe_function_data,
+ int maybe_builtin_index, FunctionKind kind = kNormalFunction);
+
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
+
+ // Initializes a JSObject based on its map.
+ void InitializeJSObjectFromMap(Handle<JSObject> obj,
+ Handle<Object> properties, Handle<Map> map);
+ // Initializes JSObject body starting at given offset.
+ void InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
+ int start_offset);
};
// Utility class to simplify argument handling around JSFunction creation.
@@ -896,17 +963,16 @@ class NewFunctionArgs final {
public:
static NewFunctionArgs ForWasm(Handle<String> name, Handle<Code> code,
Handle<Map> map);
- static NewFunctionArgs ForBuiltin(Handle<String> name, Handle<Code> code,
- Handle<Map> map, int builtin_id);
+ static NewFunctionArgs ForBuiltin(Handle<String> name, Handle<Map> map,
+ int builtin_id);
static NewFunctionArgs ForFunctionWithoutCode(Handle<String> name,
Handle<Map> map,
LanguageMode language_mode);
static NewFunctionArgs ForBuiltinWithPrototype(
- Handle<String> name, Handle<Code> code, Handle<Object> prototype,
- InstanceType type, int instance_size, int inobject_properties,
- int builtin_id, MutableMode prototype_mutability);
+ Handle<String> name, Handle<Object> prototype, InstanceType type,
+ int instance_size, int inobject_properties, int builtin_id,
+ MutableMode prototype_mutability);
static NewFunctionArgs ForBuiltinWithoutPrototype(Handle<String> name,
- Handle<Code> code,
int builtin_id,
LanguageMode language_mode);
@@ -947,4 +1013,4 @@ class NewFunctionArgs final {
} // namespace internal
} // namespace v8
-#endif // V8_FACTORY_H_
+#endif // V8_HEAP_FACTORY_H_
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 30abe44ca6..9900b343fd 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -152,7 +152,11 @@ GCTracer::GCTracer(Heap* heap)
new_space_allocation_in_bytes_since_gc_(0),
old_generation_allocation_in_bytes_since_gc_(0),
combined_mark_compact_speed_cache_(0.0),
- start_counter_(0) {
+ start_counter_(0),
+ average_mutator_duration_(0),
+ average_mark_compact_duration_(0),
+ current_mark_compact_mutator_utilization_(1.0),
+ previous_mark_compact_end_time_(0) {
// All accesses to incremental_marking_scope assume that incremental marking
// scopes come first.
STATIC_ASSERT(0 == Scope::FIRST_INCREMENTAL_SCOPE);
@@ -188,6 +192,10 @@ void GCTracer::ResetForTesting() {
recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
+ average_mutator_duration_ = 0;
+ average_mark_compact_duration_ = 0;
+ current_mark_compact_mutator_utilization_ = 1.0;
+ previous_mark_compact_end_time_ = 0;
base::LockGuard<base::Mutex> guard(&background_counter_mutex_);
for (int i = 0; i < BackgroundScope::NUMBER_OF_SCOPES; i++) {
background_counter_[i].total_duration_ms = 0;
@@ -322,6 +330,9 @@ void GCTracer::Stop(GarbageCollector collector) {
current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
current_.scopes[i] = incremental_marking_scopes_[i].duration;
}
+
+ RecordMutatorUtilization(
+ current_.end_time, duration + current_.incremental_marking_duration);
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
@@ -333,6 +344,8 @@ void GCTracer::Stop(GarbageCollector collector) {
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
DCHECK_EQ(0, current_.incremental_marking_duration);
+ RecordMutatorUtilization(
+ current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
ResetIncrementalMarkingCounters();
@@ -469,7 +482,7 @@ void GCTracer::Print() const {
"[%d:%p] "
"%8.0f ms: "
"%s %.1f (%.1f) -> %.1f (%.1f) MB, "
- "%.1f / %.1f ms %s %s %s\n",
+ "%.1f / %.1f ms %s (average mu = %.3f, current mu = %.3f) %s %s\n",
base::OS::GetCurrentProcessId(),
reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(), current_.TypeName(false),
@@ -478,6 +491,8 @@ void GCTracer::Print() const {
static_cast<double>(current_.end_object_size) / MB,
static_cast<double>(current_.end_memory_size) / MB, duration,
TotalExternalTime(), incremental_buffer,
+ AverageMarkCompactMutatorUtilization(),
+ CurrentMarkCompactMutatorUtilization(),
Heap::GarbageCollectionReasonToString(current_.gc_reason),
current_.collector_reason != nullptr ? current_.collector_reason : "");
}
@@ -662,6 +677,7 @@ void GCTracer::PrintNVP() const {
"clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
+ "clear.weak_references=%.1f "
"epilogue=%.1f "
"evacuate=%.1f "
"evacuate.candidates=%.1f "
@@ -756,6 +772,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_CLEAR_WEAK_REFERENCES],
current_.scopes[Scope::MC_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
@@ -869,6 +886,43 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
}
}
+void GCTracer::RecordMutatorUtilization(double mark_compact_end_time,
+ double mark_compact_duration) {
+ if (previous_mark_compact_end_time_ == 0) {
+ // The first event only contributes to previous_mark_compact_end_time_,
+ // because we cannot compute the mutator duration.
+ previous_mark_compact_end_time_ = mark_compact_end_time;
+ } else {
+ double total_duration =
+ mark_compact_end_time - previous_mark_compact_end_time_;
+ double mutator_duration = total_duration - mark_compact_duration;
+ if (average_mark_compact_duration_ == 0 && average_mutator_duration_ == 0) {
+ // This is the first event with mutator and mark-compact durations.
+ average_mark_compact_duration_ = mark_compact_duration;
+ average_mutator_duration_ = mutator_duration;
+ } else {
+ average_mark_compact_duration_ =
+ (average_mark_compact_duration_ + mark_compact_duration) / 2;
+ average_mutator_duration_ =
+ (average_mutator_duration_ + mutator_duration) / 2;
+ }
+ current_mark_compact_mutator_utilization_ =
+ total_duration ? mutator_duration / total_duration : 0;
+ previous_mark_compact_end_time_ = mark_compact_end_time;
+ }
+}
+
+double GCTracer::AverageMarkCompactMutatorUtilization() const {
+ double average_total_duration =
+ average_mark_compact_duration_ + average_mutator_duration_;
+ if (average_total_duration == 0) return 1.0;
+ return average_mutator_duration_ / average_total_duration;
+}
+
+double GCTracer::CurrentMarkCompactMutatorUtilization() const {
+ return current_mark_compact_mutator_utilization_;
+}
+
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
if (recorded_incremental_marking_speed_ != 0) {
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index fb0f001e3d..f35fa50d45 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -300,6 +300,11 @@ class V8_EXPORT_PRIVATE GCTracer {
void NotifyIncrementalMarkingStart();
+ // Returns average mutator utilization with respect to mark-compact
+ // garbage collections. This ignores scavenger.
+ double AverageMarkCompactMutatorUtilization() const;
+ double CurrentMarkCompactMutatorUtilization() const;
+
V8_INLINE void AddScopeSample(Scope::ScopeId scope, double duration) {
DCHECK(scope < Scope::NUMBER_OF_SCOPES);
if (scope >= Scope::FIRST_INCREMENTAL_SCOPE &&
@@ -328,6 +333,7 @@ class V8_EXPORT_PRIVATE GCTracer {
FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
FRIEND_TEST(GCTracerTest, IncrementalScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
+ FRIEND_TEST(GCTracerTest, MutatorUtilization);
struct BackgroundCounter {
double total_duration_ms;
@@ -344,6 +350,8 @@ class V8_EXPORT_PRIVATE GCTracer {
void ResetForTesting();
void ResetIncrementalMarkingCounters();
void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
+ void RecordMutatorUtilization(double mark_compactor_end_time,
+ double mark_compactor_duration);
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -415,6 +423,12 @@ class V8_EXPORT_PRIVATE GCTracer {
// Counts how many tracers were started without stopping.
int start_counter_;
+ // Used for computing average mutator utilization.
+ double average_mutator_duration_;
+ double average_mark_compact_duration_;
+ double current_mark_compact_mutator_utilization_;
+ double previous_mark_compact_end_time_;
+
base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
base::RingBuffer<BytesAndDuration> recorded_compactions_;
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 41af95fa44..230452d4d0 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,8 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/literal-objects.h"
#include "src/objects/scope-info.h"
#include "src/objects/script-inl.h"
#include "src/profiler/heap-profiler.h"
@@ -126,125 +128,6 @@ size_t Heap::NewSpaceAllocationCounter() {
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
}
-template <>
-bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
- // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
- return chars == str.length();
-}
-
-
-template <>
-bool inline Heap::IsOneByte(String* str, int chars) {
- return str->IsOneByteRepresentation();
-}
-
-
-AllocationResult Heap::AllocateInternalizedStringFromUtf8(
- Vector<const char> str, int chars, uint32_t hash_field) {
- if (IsOneByte(str, chars)) {
- return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
- hash_field);
- }
- return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
-}
-
-
-template <typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
- uint32_t hash_field) {
- if (IsOneByte(t, chars)) {
- return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
- }
- return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
-}
-
-
-AllocationResult Heap::AllocateOneByteInternalizedString(
- Vector<const uint8_t> str, uint32_t hash_field) {
- CHECK_GE(String::kMaxLength, str.length());
- // The canonical empty_string is the only zero-length string we allow.
- DCHECK_IMPLIES(str.length() == 0, roots_[kempty_stringRootIndex] == nullptr);
- // Compute map and object size.
- Map* map = one_byte_internalized_string_map();
- int size = SeqOneByteString::SizeFor(str.length());
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- // String maps are all immortal immovable objects.
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- // Fill in the characters.
- MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
- str.length());
-
- return answer;
-}
-
-
-AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field) {
- CHECK_GE(String::kMaxLength, str.length());
- DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
- // Compute map and object size.
- Map* map = internalized_string_map();
- int size = SeqTwoByteString::SizeFor(str.length());
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(map);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(str.length());
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- // Fill in the characters.
- MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
- str.length() * kUC16Size);
-
- return answer;
-}
-
-AllocationResult Heap::CopyFixedArray(FixedArray* src) {
- if (src->length() == 0) return src;
- return CopyFixedArrayWithMap(src, src->map());
-}
-
-
-AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
- if (src->length() == 0) return src;
- return CopyFixedDoubleArrayWithMap(src, src->map());
-}
-
-AllocationResult Heap::AllocateFixedArrayWithMap(RootListIndex map_root_index,
- int length,
- PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(map_root_index, length, pretenure,
- undefined_value());
-}
-
-AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
- return AllocateFixedArrayWithFiller(Heap::kFixedArrayMapRootIndex, length,
- pretenure, undefined_value());
-}
-
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
@@ -295,22 +178,33 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
+ } else if (RO_SPACE == space) {
+#ifdef V8_USE_SNAPSHOT
+ DCHECK(isolate_->serializer_enabled());
+#endif
+ DCHECK(!large_object);
+ allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
} else {
// NEW_SPACE is not allowed here.
UNREACHABLE();
}
+
if (allocation.To(&object)) {
+ if (space == CODE_SPACE) {
+ // Unprotect the memory chunk of the object if it was not unprotected
+ // already.
+ UnprotectAndRegisterMemoryChunk(object);
+ ZapCodeObject(object->address(), size_in_bytes);
+ }
OnAllocationEvent(object, size_in_bytes);
}
return allocation;
}
-
void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
- HeapProfiler* profiler = isolate_->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->AllocationEvent(object->address(), size_in_bytes);
+ for (auto& tracker : allocation_trackers_) {
+ tracker->AllocationEvent(object->address(), size_in_bytes);
}
if (FLAG_verify_predictable) {
@@ -342,6 +236,9 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
heap_profiler->ObjectMoveEvent(source->address(), target->address(),
size_in_bytes);
}
+ for (auto& tracker : allocation_trackers_) {
+ tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
+ }
if (target->IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
target->address()));
@@ -410,27 +307,55 @@ void Heap::FinalizeExternalString(String* string) {
Address Heap::NewSpaceTop() { return new_space_->top(); }
bool Heap::InNewSpace(Object* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InNewSpace(HeapObject::cast(object));
+}
+
+bool Heap::InNewSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InNewSpace(heap_object);
+}
+
+bool Heap::InNewSpace(HeapObject* heap_object) {
// Inlined check from NewSpace::Contains.
- bool result =
- object->IsHeapObject() &&
- Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
+ bool result = Page::FromAddress(heap_object->address())->InNewSpace();
DCHECK(!result || // Either not in new space
gc_state_ != NOT_IN_GC || // ... or in the middle of GC
- InToSpace(object)); // ... or in to-space (where we allocate).
+ InToSpace(heap_object)); // ... or in to-space (where we allocate).
return result;
}
bool Heap::InFromSpace(Object* object) {
- return object->IsHeapObject() &&
- MemoryChunk::FromAddress(HeapObject::cast(object)->address())
- ->IsFlagSet(Page::IN_FROM_SPACE);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InFromSpace(HeapObject::cast(object));
}
+bool Heap::InFromSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InFromSpace(heap_object);
+}
+
+bool Heap::InFromSpace(HeapObject* heap_object) {
+ return MemoryChunk::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::IN_FROM_SPACE);
+}
bool Heap::InToSpace(Object* object) {
- return object->IsHeapObject() &&
- MemoryChunk::FromAddress(HeapObject::cast(object)->address())
- ->IsFlagSet(Page::IN_TO_SPACE);
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return object->IsHeapObject() && InToSpace(HeapObject::cast(object));
+}
+
+bool Heap::InToSpace(MaybeObject* object) {
+ HeapObject* heap_object;
+ return object->ToStrongOrWeakHeapObject(&heap_object) &&
+ InToSpace(heap_object);
+}
+
+bool Heap::InToSpace(HeapObject* heap_object) {
+ return MemoryChunk::FromAddress(heap_object->address())
+ ->IsFlagSet(Page::IN_TO_SPACE);
}
bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
@@ -451,6 +376,15 @@ bool Heap::ShouldBePromoted(Address old_address) {
}
void Heap::RecordWrite(Object* object, Object** slot, Object* value) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
+ return;
+ }
+ store_buffer()->InsertEntry(reinterpret_cast<Address>(slot));
+}
+
+void Heap::RecordWrite(Object* object, MaybeObject** slot, MaybeObject* value) {
if (!InNewSpace(value) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
@@ -594,15 +528,22 @@ uint32_t Heap::HashSeed() {
int Heap::NextScriptId() {
int last_id = last_script_id()->value();
- if (last_id == Smi::kMaxValue) {
- last_id = 1;
- } else {
- last_id++;
- }
+ if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
+ last_id++;
set_last_script_id(Smi::FromInt(last_id));
return last_id;
}
+int Heap::NextDebuggingId() {
+ int last_id = last_debugging_id()->value();
+ if (last_id == SharedFunctionInfo::DebuggingIdBits::kMax) {
+ last_id = SharedFunctionInfo::kNoDebuggingId;
+ }
+ last_id++;
+ set_last_debugging_id(Smi::FromInt(last_id));
+ return last_id;
+}
+
int Heap::GetNextTemplateSerialNumber() {
int next_serial_number = next_template_serial_number()->value() + 1;
set_next_template_serial_number(Smi::FromInt(next_serial_number));
@@ -649,6 +590,24 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
}
}
+CodePageCollectionMemoryModificationScope::
+ CodePageCollectionMemoryModificationScope(Heap* heap)
+ : heap_(heap) {
+ if (heap_->write_protect_code_memory() &&
+ !heap_->code_space_memory_modification_scope_depth()) {
+ heap_->EnableUnprotectedMemoryChunksRegistry();
+ }
+}
+
+CodePageCollectionMemoryModificationScope::
+ ~CodePageCollectionMemoryModificationScope() {
+ if (heap_->write_protect_code_memory() &&
+ !heap_->code_space_memory_modification_scope_depth()) {
+ heap_->ProtectUnprotectedMemoryChunks();
+ heap_->DisableUnprotectedMemoryChunksRegistry();
+ }
+}
+
CodePageMemoryModificationScope::CodePageMemoryModificationScope(
MemoryChunk* chunk)
: chunk_(chunk),
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 9a83c0d172..ab2399aad7 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -48,6 +48,7 @@
#include "src/instruction-stream.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/data-handler.h"
+#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
#include "src/regexp/jsregexp.h"
#include "src/runtime-profiler.h"
@@ -55,7 +56,6 @@
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/unicode-decoder.h"
#include "src/unicode-inl.h"
#include "src/utils-inl.h"
@@ -161,8 +161,6 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
- out_of_memory_callback_(nullptr),
- out_of_memory_callback_data_(nullptr),
contexts_disposed_(0),
number_of_disposed_maps_(0),
new_space_(nullptr),
@@ -170,6 +168,7 @@ Heap::Heap()
code_space_(nullptr),
map_space_(nullptr),
lo_space_(nullptr),
+ read_only_space_(nullptr),
write_protect_code_memory_(false),
code_space_memory_modification_scope_depth_(0),
gc_state_(NOT_IN_GC),
@@ -182,6 +181,7 @@ Heap::Heap()
max_marking_limit_reached_(0.0),
ms_count_(0),
gc_count_(0),
+ consecutive_ineffective_mark_compacts_(0),
mmap_region_base_(0),
remembered_unmapped_pages_index_(0),
old_generation_allocation_limit_(initial_old_generation_size_),
@@ -229,10 +229,10 @@ Heap::Heap()
heap_iterator_depth_(0),
local_embedder_heap_tracer_(nullptr),
fast_promotion_mode_(false),
- use_tasks_(true),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false),
- pending_layout_change_object_(nullptr)
+ pending_layout_change_object_(nullptr),
+ unprotected_memory_chunks_registry_enabled_(false)
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
,
allocation_timeout_(0)
@@ -250,6 +250,12 @@ Heap::Heap()
RememberUnmappedPage(nullptr, false);
}
+size_t Heap::MaxReserved() {
+ const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
+ return static_cast<size_t>(
+ (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
+}
+
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
@@ -324,7 +330,8 @@ bool Heap::CanExpandOldGeneration(size_t size) {
bool Heap::HasBeenSetUp() {
return old_space_ != nullptr && code_space_ != nullptr &&
- map_space_ != nullptr && lo_space_ != nullptr;
+ map_space_ != nullptr && lo_space_ != nullptr &&
+ read_only_space_ != nullptr;
}
@@ -435,13 +442,27 @@ void Heap::ReportStatisticsAfterGC() {
}
}
+void Heap::AddHeapObjectAllocationTracker(
+ HeapObjectAllocationTracker* tracker) {
+ if (allocation_trackers_.empty()) DisableInlineAllocation();
+ allocation_trackers_.push_back(tracker);
+}
+
+void Heap::RemoveHeapObjectAllocationTracker(
+ HeapObjectAllocationTracker* tracker) {
+ allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
+ allocation_trackers_.end(), tracker),
+ allocation_trackers_.end());
+ if (allocation_trackers_.empty()) EnableInlineAllocation();
+}
+
void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
RetainingPathOption option) {
if (!FLAG_track_retaining_path) {
- PrintF("Retaining path tracking requires --trace-retaining-path\n");
+ PrintF("Retaining path tracking requires --track-retaining-path\n");
} else {
int index = 0;
- Handle<WeakFixedArray> array = WeakFixedArray::Add(
+ Handle<FixedArrayOfWeakCells> array = FixedArrayOfWeakCells::Add(
handle(retaining_path_targets(), isolate()), object, &index);
set_retaining_path_targets(*array);
retaining_path_target_option_[index] = option;
@@ -450,8 +471,9 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
bool Heap::IsRetainingPathTarget(HeapObject* object,
RetainingPathOption* option) {
- if (!retaining_path_targets()->IsWeakFixedArray()) return false;
- WeakFixedArray* targets = WeakFixedArray::cast(retaining_path_targets());
+ if (!retaining_path_targets()->IsFixedArrayOfWeakCells()) return false;
+ FixedArrayOfWeakCells* targets =
+ FixedArrayOfWeakCells::cast(retaining_path_targets());
int length = targets->Length();
for (int i = 0; i < length; i++) {
if (targets->Get(i) == object) {
@@ -616,6 +638,8 @@ const char* Heap::GetSpaceName(int idx) {
return "code_space";
case LO_SPACE:
return "large_object_space";
+ case RO_SPACE:
+ return "read_only_space";
default:
UNREACHABLE();
}
@@ -965,7 +989,7 @@ void Heap::GarbageCollectionEpilogue() {
void Heap::PreprocessStackTraces() {
- WeakFixedArray::Iterator iterator(weak_stack_trace_list());
+ FixedArrayOfWeakCells::Iterator iterator(weak_stack_trace_list());
FixedArray* elements;
while ((elements = iterator.Next<FixedArray>()) != nullptr) {
for (int j = 1; j < elements->length(); j += 4) {
@@ -1063,6 +1087,33 @@ void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
}
}
+HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
+ if (IsYoungGenerationCollector(collector)) {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_scavenger_background();
+ }
+ return isolate_->counters()->gc_scavenger_foreground();
+ } else {
+ if (!incremental_marking()->IsStopped()) {
+ if (ShouldReduceMemory()) {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_finalize_reduce_memory_background();
+ }
+ return isolate_->counters()->gc_finalize_reduce_memory_foreground();
+ } else {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_finalize_background();
+ }
+ return isolate_->counters()->gc_finalize_foreground();
+ }
+ } else {
+ if (isolate_->IsIsolateInBackground()) {
+ return isolate_->counters()->gc_compactor_background();
+ }
+ return isolate_->counters()->gc_compactor_foreground();
+ }
+ }
+}
HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
if (IsYoungGenerationCollector(collector)) {
@@ -1163,7 +1214,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
if (gc_reason == GarbageCollectionReason::kLastResort) {
- InvokeOutOfMemoryCallback();
+ InvokeNearHeapLimitCallback();
}
RuntimeCallTimerScope runtime_timer(
isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
@@ -1226,7 +1277,7 @@ void Heap::ReportExternalMemoryPressure() {
}
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->CanBeActivated()) {
- StartIncrementalMarking(i::Heap::kNoGCFlags,
+ StartIncrementalMarking(GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kExternalMemoryPressure,
kGCCallbackFlagsForExternalMemory);
} else {
@@ -1267,12 +1318,16 @@ void Heap::EnsureFillerObjectAtTop() {
bool Heap::CollectGarbage(AllocationSpace space,
GarbageCollectionReason gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
- // The VM is in the GC state until exiting this function.
- VMState<GC> state(isolate());
-
const char* collector_reason = nullptr;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+ if (!CanExpandOldGeneration(new_space()->Capacity())) {
+ InvokeNearHeapLimitCallback();
+ }
+
+ // The VM is in the GC state until exiting this function.
+ VMState<GC> state(isolate());
+
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// Reset the allocation timeout, but make sure to allow at least a few
// allocations after a collection. The reason for this is that we have a lot
@@ -1311,6 +1366,10 @@ bool Heap::CollectGarbage(AllocationSpace space,
HistogramTimerScope histogram_timer_scope(gc_type_timer);
TRACE_EVENT0("v8", gc_type_timer->name());
+ HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
+ HistogramTimerScope histogram_timer_priority_scope(
+ gc_type_priority_timer);
+
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
@@ -1357,7 +1416,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (IsYoungGenerationCollector(collector) &&
!ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfAllocationLimitIsReached(
- kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
}
return next_gc_likely_to_collect_more;
@@ -1445,6 +1505,7 @@ class StringTableVerifier : public ObjectVisitor {
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
if ((*p)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*p);
Isolate* isolate = object->GetIsolate();
@@ -1454,6 +1515,10 @@ class StringTableVerifier : public ObjectVisitor {
}
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ UNREACHABLE();
+ }
};
@@ -1469,8 +1534,8 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
- for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
- space++) {
+ for (int space = FIRST_SPACE;
+ space < SerializerDeserializer::kNumberOfSpaces; space++) {
Reservation* reservation = &reservations[space];
DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) continue;
@@ -1543,7 +1608,7 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
// so that we cannot allocate space to deserialize the initial heap.
if (!deserialization_complete_) {
V8::FatalProcessOutOfMemory(
- "insufficient memory to create an Isolate");
+ isolate(), "insufficient memory to create an Isolate");
}
if (space == NEW_SPACE) {
CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
@@ -1572,7 +1637,7 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed.
// Memory is exhausted and we will die.
- V8::FatalProcessOutOfMemory("Committing semi space failed.");
+ FatalProcessOutOfMemory("Committing semi space failed.");
}
@@ -1707,6 +1772,8 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
+ CheckIneffectiveMarkCompact(
+ old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
@@ -1785,6 +1852,7 @@ void Heap::MarkCompact() {
}
void Heap::MinorMarkCompact() {
+#ifdef ENABLE_MINOR_MC
DCHECK(FLAG_minor_mc);
PauseAllocationObserversScope pause_observers(this);
@@ -1802,6 +1870,9 @@ void Heap::MinorMarkCompact() {
LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
SetGCState(NOT_IN_GC);
+#else
+ UNREACHABLE();
+#endif // ENABLE_MINOR_MC
}
void Heap::MarkCompactEpilogue() {
@@ -1920,7 +1991,8 @@ static bool IsLogging(Isolate* isolate) {
return FLAG_verify_predictable || isolate->logger()->is_logging() ||
isolate->is_profiling() ||
(isolate->heap_profiler() != nullptr &&
- isolate->heap_profiler()->is_tracking_object_moves());
+ isolate->heap_profiler()->is_tracking_object_moves()) ||
+ isolate->heap()->has_heap_object_allocation_tracker();
}
class PageScavengingItem final : public ItemParallelJob::Item {
@@ -1978,10 +2050,14 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
- static int num_cores =
- 1 + static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
+ static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
+ int tasks =
+ Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
+ if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
+ // Optimize for memory usage near the heap limit.
+ tasks = 1;
+ }
+ return tasks;
}
void Heap::Scavenge() {
@@ -2158,6 +2234,33 @@ void Heap::ComputeFastPromotionMode(double survival_rate) {
}
}
+void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
+ if (unprotected_memory_chunks_registry_enabled_) {
+ base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
+ if (unprotected_memory_chunks_.insert(chunk).second) {
+ chunk->SetReadAndWritable();
+ }
+ }
+}
+
+void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
+ UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
+}
+
+void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
+ unprotected_memory_chunks_.erase(chunk);
+}
+
+void Heap::ProtectUnprotectedMemoryChunks() {
+ DCHECK(unprotected_memory_chunks_registry_enabled_);
+ for (auto chunk = unprotected_memory_chunks_.begin();
+ chunk != unprotected_memory_chunks_.end(); chunk++) {
+ CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
+ (*chunk)->SetReadAndExecutable();
+ }
+ unprotected_memory_chunks_.clear();
+}
+
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
@@ -2447,225 +2550,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
}
-AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
- int instance_size) {
- Object* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
- if (!allocation.To(&result)) return allocation;
- // Map::cast cannot be used due to uninitialized map field.
- Map* map = reinterpret_cast<Map*>(result);
- map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
- SKIP_WRITE_BARRIER);
- map->set_instance_type(instance_type);
- map->set_instance_size(instance_size);
- // Initialize to only containing tagged fields.
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- // GetVisitorId requires a properly initialized LayoutDescriptor.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_inobject_properties_start_or_constructor_function_index(0);
- DCHECK(!map->IsJSObjectMap());
- map->SetInObjectUnusedPropertyFields(0);
- map->set_bit_field(0);
- map->set_bit_field2(0);
- int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->set_weak_cell_cache(Smi::kZero);
- map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- return map;
-}
-
-AllocationResult Heap::AllocateMap(InstanceType instance_type,
- int instance_size,
- ElementsKind elements_kind,
- int inobject_properties) {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
- !Map::CanHaveFastTransitionableElementsKind(instance_type),
- IsDictionaryElementsKind(elements_kind) ||
- IsTerminalElementsKind(elements_kind));
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- isolate()->counters()->maps_created()->Increment();
- result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
- Map* map = Map::cast(result);
- map->set_instance_type(instance_type);
- map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
- map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
- map->set_instance_size(instance_size);
- if (map->IsJSObjectMap()) {
- map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize -
- inobject_properties);
- DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
- } else {
- DCHECK_EQ(inobject_properties, 0);
- map->set_inobject_properties_start_or_constructor_function_index(0);
- }
- map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::kZero);
- map->set_raw_transitions(Smi::kZero);
- map->SetInObjectUnusedPropertyFields(inobject_properties);
- map->set_instance_descriptors(empty_descriptor_array());
- if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
- }
- // Must be called only after |instance_type|, |instance_size| and
- // |layout_descriptor| are set.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_bit_field(0);
- map->set_bit_field2(Map::IsExtensibleBit::kMask);
- int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
- Map::OwnsDescriptorsBit::encode(true) |
- Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->set_elements_kind(elements_kind);
- map->set_new_target_is_base(true);
- if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
- return map;
-}
-
-
-AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
- AllocationSpace space) {
- HeapObject* obj = nullptr;
- {
- AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
- AllocationResult allocation = AllocateRaw(size, space, align);
- if (!allocation.To(&obj)) return allocation;
- }
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- DCHECK(chunk->owner()->identity() == space);
-#endif
- CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
- return obj;
-}
-
-
-AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
- PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate heap numbers in paged
- // spaces.
- int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
-
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
- if (!allocation.To(&result)) return allocation;
- }
-
- Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
- HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- return result;
-}
-
-AllocationResult Heap::AllocateBigInt(int length, PretenureFlag pretenure) {
- if (length < 0 || length > BigInt::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true);
- }
- int size = BigInt::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER);
- return result;
-}
-
-AllocationResult Heap::AllocateCell(Object* value) {
- int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
- Cell::cast(result)->set_value(value);
- return result;
-}
-
-AllocationResult Heap::AllocateFeedbackCell(Map* map, HeapObject* value) {
- int size = FeedbackCell::kSize;
- STATIC_ASSERT(FeedbackCell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FeedbackCell::cast(result)->set_value(value);
- return result;
-}
-
-AllocationResult Heap::AllocatePropertyCell(Name* name) {
- DCHECK(name->IsUniqueName());
- int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- result->set_map_after_allocation(global_property_cell_map(),
- SKIP_WRITE_BARRIER);
- PropertyCell* cell = PropertyCell::cast(result);
- cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
- SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::kZero));
- cell->set_name(name);
- cell->set_value(the_hole_value());
- return result;
-}
-
-
-AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
- int size = WeakCell::kSize;
- STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
- WeakCell::cast(result)->initialize(value);
- return result;
-}
-
-
-AllocationResult Heap::AllocateTransitionArray(int capacity) {
- DCHECK_LT(0, capacity);
- HeapObject* raw_array = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
- if (!allocation.To(&raw_array)) return allocation;
- }
- raw_array->set_map_after_allocation(transition_array_map(),
- SKIP_WRITE_BARRIER);
- TransitionArray* array = TransitionArray::cast(raw_array);
- array->set_length(capacity);
- MemsetPointer(array->data_start(), undefined_value(), capacity);
- // Transition arrays are tenured. When black allocation is on we have to
- // add the transition array to the list of encountered_transition_arrays.
- if (incremental_marking()->black_allocation()) {
- mark_compact_collector()->AddTransitionArray(array);
- }
- return array;
-}
-
void Heap::CreateJSEntryStub() {
JSEntryStub stub(isolate(), StackFrame::ENTRY);
set_js_entry_code(*stub.GetCode());
@@ -2723,8 +2607,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kMaterializedObjectsRootIndex:
case kMicrotaskQueueRootIndex:
case kDetachedContextsRootIndex:
- case kWeakObjectToCodeTableRootIndex:
- case kWeakNewSpaceObjectToCodeListRootIndex:
case kRetainedMapsRootIndex:
case kRetainingPathTargetsRootIndex:
case kFeedbackVectorsForProfilingToolsRootIndex:
@@ -2781,34 +2663,38 @@ void Heap::FlushNumberStringCache() {
}
}
+namespace {
-Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
- return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
- ExternalArrayType array_type) {
+Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
switch (array_type) {
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return kFixed##Type##ArrayMapRootIndex;
+ return Heap::kFixed##Type##ArrayMapRootIndex;
TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX
+ }
+ UNREACHABLE();
+}
+Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ return Heap::kFixed##Type##ArrayMapRootIndex;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
UNREACHABLE();
+#undef TYPED_ARRAY_CASE
}
}
-
-Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
- ElementsKind elementsKind) {
- switch (elementsKind) {
+Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
+ ElementsKind elements_kind) {
+ switch (elements_kind) {
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
- return kEmptyFixed##Type##ArrayRootIndex;
+ return Heap::kEmptyFixed##Type##ArrayRootIndex;
TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX
@@ -2817,122 +2703,24 @@ Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
}
}
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
- return FixedTypedArrayBase::cast(
- roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
-}
-
-
-AllocationResult Heap::AllocateForeign(Address address,
- PretenureFlag pretenure) {
- // Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
- Foreign* result = nullptr;
- AllocationResult allocation = Allocate(foreign_map(), space);
- if (!allocation.To(&result)) return allocation;
- result->set_foreign_address(address);
- return result;
-}
-
-AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity,
- PretenureFlag pretenure) {
- DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor);
- CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity);
-
- int size = SmallOrderedHashSet::Size(capacity);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(small_ordered_hash_set_map(),
- SKIP_WRITE_BARRIER);
- Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result));
- table->Initialize(isolate(), capacity);
- return result;
-}
-
-AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity,
- PretenureFlag pretenure) {
- DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor);
- CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity);
-
- int size = SmallOrderedHashMap::Size(capacity);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
+} // namespace
- result->set_map_after_allocation(small_ordered_hash_map_map(),
- SKIP_WRITE_BARRIER);
- Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result));
- table->Initialize(isolate(), capacity);
- return result;
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}
-AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
- if (length < 0 || length > ByteArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = ByteArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
- ByteArray::cast(result)->set_length(length);
- ByteArray::cast(result)->clear_padding();
- return result;
+Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
+ return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
}
-
-AllocationResult Heap::AllocateBytecodeArray(int length,
- const byte* const raw_bytecodes,
- int frame_size,
- int parameter_count,
- FixedArray* constant_pool) {
- if (length < 0 || length > BytecodeArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- // Bytecode array is pretenured, so constant pool array should be to.
- DCHECK(!InNewSpace(constant_pool));
-
- int size = BytecodeArray::SizeFor(length);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
- BytecodeArray* instance = BytecodeArray::cast(result);
- instance->set_length(length);
- instance->set_frame_size(frame_size);
- instance->set_parameter_count(parameter_count);
- instance->set_incoming_new_target_or_generator_register(
- interpreter::Register::invalid_value());
- instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
- instance->set_osr_loop_nesting_level(0);
- instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
- instance->set_constant_pool(constant_pool);
- instance->set_handler_table(empty_byte_array());
- instance->set_source_position_table(empty_byte_array());
- CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
- instance->clear_padding();
-
- return result;
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
+ return FixedTypedArrayBase::cast(
+ roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}
HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots mode) {
+ ClearRecordedSlots clear_slots_mode,
+ ClearFreedMemoryMode clear_memory_mode) {
if (size == 0) return nullptr;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
@@ -2943,14 +2731,22 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
SKIP_WRITE_BARRIER);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+ Memory::Address_at(addr + kPointerSize) =
+ reinterpret_cast<Address>(kClearedFreeMemoryValue);
+ }
} else {
DCHECK_GT(size, 2 * kPointerSize);
filler->set_map_after_allocation(
reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->relaxed_write_size(size);
+ if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
+ memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
+ kClearedFreeMemoryValue, size - 2 * kPointerSize);
+ }
}
- if (mode == ClearRecordedSlots::kYes) {
+ if (clear_slots_mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
}
@@ -2985,8 +2781,9 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
CHECK_NOT_NULL(object);
DCHECK(CanMoveObjectStart(object));
- DCHECK(!object->IsFixedTypedArrayBase());
- DCHECK(!object->IsByteArray());
+ // Add custom visitor to concurrent marker if new left-trimmable type
+ // is added.
+ DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
Map* map = object->map();
@@ -3097,1030 +2894,13 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// avoid races with the sweeper thread.
object->synchronized_set_length(len - elements_to_trim);
- // Notify the heap profiler of change in object layout. The array may not be
- // moved during GC, and size has to be adjusted nevertheless.
- HeapProfiler* profiler = isolate()->heap_profiler();
- if (profiler->is_tracking_allocations()) {
- profiler->UpdateObjectSizeEvent(object->address(), object->Size());
- }
-}
-
-
-AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure) {
- int size = FixedTypedArrayBase::kHeaderSize;
- AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
- elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
- elements->set_length(length);
- return elements;
-}
-
-static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
- ElementsKind* element_kind) {
- switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: \
- *element_size = size; \
- *element_kind = TYPE##_ELEMENTS; \
- return;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
- default:
- *element_size = 0; // Bogus
- *element_kind = UINT8_ELEMENTS; // Bogus
- UNREACHABLE();
- }
-}
-
-
-AllocationResult Heap::AllocateFixedTypedArray(int length,
- ExternalArrayType array_type,
- bool initialize,
- PretenureFlag pretenure) {
- int element_size;
- ElementsKind elements_kind;
- ForFixedTypedArray(array_type, &element_size, &elements_kind);
- int size = OBJECT_POINTER_ALIGN(length * element_size +
- FixedTypedArrayBase::kDataOffset);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- AllocationResult allocation = AllocateRaw(
- size, space,
- array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
- if (!allocation.To(&object)) return allocation;
-
- object->set_map_after_allocation(MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
- FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
- elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(
- ExternalReference::fixed_typed_array_base_data_offset().address(),
- SKIP_WRITE_BARRIER);
- elements->set_length(length);
- if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
- return elements;
-}
-
-AllocationResult Heap::AllocateCode(int object_size, Movability movability) {
- DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
-
- HeapObject* result = nullptr;
- if (!allocation.To(&result)) return allocation;
- if (movability == kImmovable) {
- Address address = result->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
- // Code objects which should stay at a fixed address are allocated either
- // in the first page of code space, in large object space, or (during
- // snapshot creation) the containing page is marked as immovable.
- if (!Heap::IsImmovable(result)) {
- if (isolate()->serializer_enabled() ||
- code_space_->FirstPage()->Contains(address)) {
- chunk->MarkNeverEvacuate();
- } else {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(result->address(), object_size,
- ClearRecordedSlots::kNo);
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- if (!allocation.To(&result)) return allocation;
- OnAllocationEvent(result, object_size);
- }
- }
- }
-
- result->set_map_after_allocation(code_map(), SKIP_WRITE_BARRIER);
- Code* code = Code::cast(result);
- DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- object_size <= code_space()->AreaSize());
- return code;
-}
-
-AllocationResult Heap::AllocateCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, ByteArray* source_position_table,
- DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset) {
- bool has_unwinding_info = desc.unwinding_info != nullptr;
- DCHECK((has_unwinding_info && desc.unwinding_info_size > 0) ||
- (!has_unwinding_info && desc.unwinding_info_size == 0));
-
- // Compute size.
- int body_size = desc.instr_size;
- int unwinding_info_size_field_size = kInt64Size;
- if (has_unwinding_info) {
- body_size = RoundUp(body_size, kInt64Size) + desc.unwinding_info_size +
- unwinding_info_size_field_size;
- }
- int object_size = Code::SizeFor(RoundUp(body_size, kObjectAlignment));
-
- Code* code = nullptr;
- CodeSpaceMemoryModificationScope code_allocation(this);
- AllocationResult allocation = AllocateCode(object_size, movability);
- if (!allocation.To(&code)) return allocation;
-
- // The code object has not been fully initialized yet. We rely on the
- // fact that no allocation will happen from this point on.
- DisallowHeapAllocation no_gc;
- code->set_instruction_size(desc.instr_size);
- code->set_relocation_info(reloc_info);
- code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots);
- code->set_safepoint_table_offset(safepoint_table_offset);
- code->set_handler_table_offset(handler_table_offset);
- code->set_code_data_container(data_container);
- code->set_has_tagged_params(true);
- code->set_deoptimization_data(deopt_data);
- code->set_stub_key(stub_key);
- code->set_source_position_table(source_position_table);
- code->set_protected_instructions(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
- code->set_builtin_index(builtin_index);
- code->set_trap_handler_index(Smi::FromInt(-1));
-
- switch (code->kind()) {
- case Code::OPTIMIZED_FUNCTION:
- code->set_marked_for_deoptimization(false);
- break;
- case Code::JS_TO_WASM_FUNCTION:
- case Code::C_WASM_ENTRY:
- case Code::WASM_FUNCTION:
- code->set_has_tagged_params(false);
- break;
- default:
- break;
- }
-
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_ref.is_null()) *(self_ref.location()) = code;
-
- // Migrate generated code.
- // The generated code can contain Object** values (typically from handles)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFrom(desc);
-
- code->clear_padding();
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify();
-#endif
- DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(code->address()) ||
- object_size <= code_space()->AreaSize());
- return code;
-}
-
-AllocationResult Heap::CopyCode(Code* code, CodeDataContainer* data_container) {
- AllocationResult allocation;
-
- HeapObject* result = nullptr;
- // Allocate an object the same size as the code object.
- int obj_size = code->Size();
- allocation = AllocateRaw(obj_size, CODE_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- // Copy code object.
- Address old_addr = code->address();
- Address new_addr = result->address();
- CopyBlock(new_addr, old_addr, obj_size);
- Code* new_code = Code::cast(result);
-
- // Set the {CodeDataContainer}, it cannot be shared.
- new_code->set_code_data_container(data_container);
-
- // Clear the trap handler index since they can't be shared between code. We
- // have to do this before calling Relocate because relocate would adjust the
- // base pointer for the old code.
- new_code->set_trap_handler_index(Smi::FromInt(trap_handler::kInvalidIndex));
-
- // Relocate the copy.
- new_code->Relocate(new_addr - old_addr);
- // We have to iterate over the object and process its pointers when black
- // allocation is on.
- incremental_marking()->ProcessBlackAllocatedObject(new_code);
- // Record all references to embedded objects in the new code object.
- RecordWritesIntoCode(new_code);
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) new_code->ObjectVerify();
-#endif
- DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
- DCHECK(!memory_allocator()->code_range()->valid() ||
- memory_allocator()->code_range()->contains(new_code->address()) ||
- obj_size <= code_space()->AreaSize());
- return new_code;
-}
-
-AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
- int size = BytecodeArray::SizeFor(bytecode_array->length());
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
- BytecodeArray* copy = BytecodeArray::cast(result);
- copy->set_length(bytecode_array->length());
- copy->set_frame_size(bytecode_array->frame_size());
- copy->set_parameter_count(bytecode_array->parameter_count());
- copy->set_incoming_new_target_or_generator_register(
- bytecode_array->incoming_new_target_or_generator_register());
- copy->set_constant_pool(bytecode_array->constant_pool());
- copy->set_handler_table(bytecode_array->handler_table());
- copy->set_source_position_table(bytecode_array->source_position_table());
- copy->set_interrupt_budget(bytecode_array->interrupt_budget());
- copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
- copy->set_bytecode_age(bytecode_array->bytecode_age());
- bytecode_array->CopyBytecodesTo(copy);
- return copy;
-}
-
-void Heap::InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site) {
- memento->set_map_after_allocation(allocation_memento_map(),
- SKIP_WRITE_BARRIER);
- DCHECK(allocation_site->map() == allocation_site_map());
- memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
- if (FLAG_allocation_site_pretenuring) {
- allocation_site->IncrementMementoCreateCount();
- }
-}
-
-
-AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site) {
- DCHECK(gc_state_ == NOT_IN_GC);
- DCHECK(map->instance_type() != MAP_TYPE);
- int size = map->instance_size();
- if (allocation_site != nullptr) {
- size += AllocationMemento::kSize;
- }
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- // New space objects are allocated white.
- WriteBarrierMode write_barrier_mode =
- space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
- result->set_map_after_allocation(map, write_barrier_mode);
- if (allocation_site != nullptr) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(result) + map->instance_size());
- InitializeAllocationMemento(alloc_memento, allocation_site);
- }
- return result;
-}
-
-AllocationResult Heap::AllocateJSPromise(JSFunction* constructor,
- PretenureFlag pretenure) {
- AllocationResult allocation = AllocateJSObject(constructor, pretenure);
- JSPromise* promise = nullptr;
- if (!allocation.To(&promise)) return allocation;
-
- // Setup JSPromise fields
- promise->set_reactions_or_result(Smi::kZero);
- promise->set_flags(0);
- for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
- promise->SetEmbedderField(i, Smi::kZero);
- }
- return promise;
-}
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj, Object* properties,
- Map* map) {
- obj->set_raw_properties_or_hash(properties);
- obj->initialize_elements();
- // TODO(1240798): Initialize the object's body using valid initial values
- // according to the object's initial map. For example, if the map's
- // instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (e.g. Smi::kZero) and the elements initialized to a
- // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
- // verification code has to cope with (temporarily) invalid objects. See
- // for example, JSArray::JSArrayVerify).
- InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
-}
-
-
-void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
- if (start_offset == map->instance_size()) return;
- DCHECK_LT(start_offset, map->instance_size());
-
- // We cannot always fill with one_pointer_filler_map because objects
- // created from API functions expect their embedder fields to be initialized
- // with undefined_value.
- // Pre-allocated fields need to be initialized with undefined_value as well
- // so that object accesses before the constructor completes (e.g. in the
- // debugger) will not cause a crash.
-
- // In case of Array subclassing the |map| could already be transitioned
- // to different elements kind from the initial map on which we track slack.
- bool in_progress = map->IsInobjectSlackTrackingInProgress();
- Object* filler;
- if (in_progress) {
- filler = one_pointer_filler_map();
- } else {
- filler = undefined_value();
- }
- obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
- if (in_progress) {
- map->FindRootMap()->InobjectSlackTrackingStep();
- }
-}
-
-
-AllocationResult Heap::AllocateJSObjectFromMap(
- Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
- // JSFunctions should be allocated using AllocateFunction to be
- // properly initialized.
- DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
-
- // Both types of global objects should be allocated using
- // AllocateGlobalObject to be properly initialized.
- DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
-
- // Allocate the backing storage for the properties.
- FixedArray* properties = empty_fixed_array();
-
- // Allocate the JSObject.
- AllocationSpace space = SelectSpace(pretenure);
- JSObject* js_obj = nullptr;
- AllocationResult allocation = Allocate(map, space, allocation_site);
- if (!allocation.To(&js_obj)) return allocation;
-
- // Initialize the JSObject.
- InitializeJSObjectFromMap(js_obj, properties, map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
- js_obj->HasFastStringWrapperElements() ||
- js_obj->HasFastArgumentsElements());
- return js_obj;
-}
-
-
-AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure,
- AllocationSite* allocation_site) {
- DCHECK(constructor->has_initial_map());
-
- // Allocate the object based on the constructors initial map.
- AllocationResult allocation = AllocateJSObjectFromMap(
- constructor->initial_map(), pretenure, allocation_site);
-#ifdef DEBUG
- // Make sure result is NOT a global object if valid.
- HeapObject* obj = nullptr;
- DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
-#endif
- return allocation;
-}
-
-
-AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
- // Make the clone.
- Map* map = source->map();
-
- // We can only clone regexps, normal objects, api objects, errors or arrays.
- // Copying anything else will break invariants.
- CHECK(map->instance_type() == JS_REGEXP_TYPE ||
- map->instance_type() == JS_OBJECT_TYPE ||
- map->instance_type() == JS_ERROR_TYPE ||
- map->instance_type() == JS_ARRAY_TYPE ||
- map->instance_type() == JS_API_OBJECT_TYPE ||
- map->instance_type() == WASM_INSTANCE_TYPE ||
- map->instance_type() == WASM_MEMORY_TYPE ||
- map->instance_type() == WASM_MODULE_TYPE ||
- map->instance_type() == WASM_TABLE_TYPE ||
- map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
-
- int object_size = map->instance_size();
- HeapObject* clone = nullptr;
-
- DCHECK(site == nullptr || AllocationSite::CanTrack(map->instance_type()));
-
- int adjusted_object_size =
- site != nullptr ? object_size + AllocationMemento::kSize : object_size;
- AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
- if (!allocation.To(&clone)) return allocation;
-
- SLOW_DCHECK(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(clone->address(), source->address(), object_size);
-
- if (site != nullptr) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- InitializeAllocationMemento(alloc_memento, site);
- }
-
- SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
- source->GetElementsKind());
- FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
- // Update elements if necessary.
- if (elements->length() > 0) {
- FixedArrayBase* elem = nullptr;
- {
- AllocationResult allocation;
- if (elements->map() == fixed_cow_array_map()) {
- allocation = FixedArray::cast(elements);
- } else if (source->HasDoubleElements()) {
- allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
- } else {
- allocation = CopyFixedArray(FixedArray::cast(elements));
- }
- if (!allocation.To(&elem)) return allocation;
- }
- JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
- }
-
- // Update properties if necessary.
- if (source->HasFastProperties()) {
- if (source->property_array()->length() > 0) {
- PropertyArray* properties = source->property_array();
- PropertyArray* prop = nullptr;
- {
- // TODO(gsathya): Do not copy hash code.
- AllocationResult allocation = CopyPropertyArray(properties);
- if (!allocation.To(&prop)) return allocation;
- }
- JSObject::cast(clone)->set_raw_properties_or_hash(prop,
- SKIP_WRITE_BARRIER);
- }
- } else {
- FixedArray* properties = FixedArray::cast(source->property_dictionary());
- FixedArray* prop = nullptr;
- {
- AllocationResult allocation = CopyFixedArray(properties);
- if (!allocation.To(&prop)) return allocation;
- }
- JSObject::cast(clone)->set_raw_properties_or_hash(prop, SKIP_WRITE_BARRIER);
- }
- // Return the new clone.
- return clone;
-}
-
-
-static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
- int len) {
- // Only works for one byte strings.
- DCHECK(vector.length() == len);
- MemCopy(chars, vector.start(), len);
-}
-
-static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
- int len) {
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
- while (!it.Done()) {
- DCHECK_GT(len, 0);
- len -= 1;
-
- uint16_t c = *it;
- ++it;
- DCHECK_NE(unibrow::Utf8::kBadChar, c);
- *chars++ = c;
- }
- DCHECK_EQ(len, 0);
-}
-
-
-static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
- DCHECK(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-
-static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
- DCHECK(s->length() == len);
- String::WriteToFlat(s, chars, 0, len);
-}
-
-
-template <bool is_one_byte, typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
- uint32_t hash_field) {
- DCHECK_LE(0, chars);
- // Compute map and object size.
- int size;
- Map* map;
-
- DCHECK_LE(0, chars);
- DCHECK_GE(String::kMaxLength, chars);
- if (is_one_byte) {
- map = one_byte_internalized_string_map();
- size = SeqOneByteString::SizeFor(chars);
- } else {
- map = internalized_string_map();
- size = SeqTwoByteString::SizeFor(chars);
- }
-
- // Allocate string.
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- // Set length and hash fields of the allocated string.
- String* answer = String::cast(result);
- answer->set_length(chars);
- answer->set_hash_field(hash_field);
-
- DCHECK_EQ(size, answer->Size());
-
- if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
- } else {
- WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
- }
- return answer;
-}
-
-
-// Need explicit instantiations.
-template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
- int,
- uint32_t);
-template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
- int,
- uint32_t);
-template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
- Vector<const char>, int, uint32_t);
-
-
-AllocationResult Heap::AllocateRawOneByteString(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
- DCHECK_GE(String::kMaxLength, length);
- int size = SeqOneByteString::SizeFor(length);
- DCHECK_GE(SeqOneByteString::kMaxSize, size);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Partially initialize the object.
- result->set_map_after_allocation(one_byte_string_map(), SKIP_WRITE_BARRIER);
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, HeapObject::cast(result)->Size());
-
- return result;
-}
-
-
-AllocationResult Heap::AllocateRawTwoByteString(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
- DCHECK_GE(String::kMaxLength, length);
- int size = SeqTwoByteString::SizeFor(length);
- DCHECK_GE(SeqTwoByteString::kMaxSize, size);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Partially initialize the object.
- result->set_map_after_allocation(string_map(), SKIP_WRITE_BARRIER);
- String::cast(result)->set_length(length);
- String::cast(result)->set_hash_field(String::kEmptyHashField);
- DCHECK_EQ(size, HeapObject::cast(result)->Size());
- return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedArray() {
- int size = FixedArray::SizeFor(0);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- // Initialize the object.
- result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(result)->set_length(0);
- return result;
-}
-
-AllocationResult Heap::AllocateEmptyScopeInfo() {
- int size = FixedArray::SizeFor(0);
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
- }
- // Initialize the object.
- result->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(result)->set_length(0);
- return result;
-}
-
-AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
- if (!InNewSpace(src)) {
- return src;
- }
-
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
-
- // TODO(mvstanton): The map is set twice because of protection against calling
- // set() on a COW FixedArray. Issue v8:3221 created to track this, and
- // we might then be able to remove this whole method.
- HeapObject::cast(obj)->set_map_after_allocation(fixed_cow_array_map(),
- SKIP_WRITE_BARRIER);
- return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedTypedArray(
- ExternalArrayType array_type) {
- return AllocateFixedTypedArray(0, array_type, false, TENURED);
-}
-
-namespace {
-template <typename T>
-void initialize_length(T* array, int length) {
- array->set_length(length);
-}
-
-template <>
-void initialize_length<PropertyArray>(PropertyArray* array, int length) {
- array->initialize_length(length);
-}
-
-} // namespace
-
-template <typename T>
-AllocationResult Heap::CopyArrayAndGrow(T* src, int grow_by,
- PretenureFlag pretenure) {
- int old_len = src->length();
- int new_len = old_len + grow_by;
- DCHECK(new_len >= old_len);
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
- if (!allocation.To(&obj)) return allocation;
- }
-
- obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
- T* result = T::cast(obj);
- initialize_length(result, new_len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
- MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
- return result;
-}
-
-template AllocationResult Heap::CopyArrayAndGrow(FixedArray* src, int grow_by,
- PretenureFlag pretenure);
-template AllocationResult Heap::CopyArrayAndGrow(PropertyArray* src,
- int grow_by,
- PretenureFlag pretenure);
-
-AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
- PretenureFlag pretenure) {
- if (new_len == 0) return empty_fixed_array();
-
- DCHECK_LE(new_len, src->length());
-
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
-
- FixedArray* result = FixedArray::cast(obj);
- result->set_length(new_len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-template <typename T>
-AllocationResult Heap::CopyArrayWithMap(T* src, Map* map) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
-
- T* result = T::cast(obj);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- // Eliminate the write barrier if possible.
- if (mode == SKIP_WRITE_BARRIER) {
- CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
- T::SizeFor(len) - kPointerSize);
- return obj;
- }
-
- // Slow case: Just copy the content one-by-one.
- initialize_length(result, len);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-template AllocationResult Heap::CopyArrayWithMap(FixedArray* src, Map* map);
-template AllocationResult Heap::CopyArrayWithMap(PropertyArray* src, Map* map);
-
-AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
- return CopyArrayWithMap(src, map);
-}
-
-AllocationResult Heap::CopyPropertyArray(PropertyArray* src) {
- return CopyArrayWithMap(src, property_array_map());
-}
-
-AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
- Map* map) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
- src->address() + FixedDoubleArray::kLengthOffset,
- FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
- return obj;
-}
-
-AllocationResult Heap::CopyFeedbackVector(FeedbackVector* src) {
- int len = src->length();
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFeedbackVector(len, NOT_TENURED);
- if (!allocation.To(&obj)) return allocation;
- }
- obj->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
-
- FeedbackVector* result = FeedbackVector::cast(obj);
-
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- // Eliminate the write barrier if possible.
- if (mode == SKIP_WRITE_BARRIER) {
- CopyBlock(result->address() + kPointerSize,
- result->address() + kPointerSize,
- FeedbackVector::SizeFor(len) - kPointerSize);
- return result;
- }
-
- // Slow case: Just copy the content one-by-one.
- result->set_shared_function_info(src->shared_function_info());
- result->set_optimized_code_cell(src->optimized_code_cell());
- result->set_invocation_count(src->invocation_count());
- result->set_profiler_ticks(src->profiler_ticks());
- result->set_deopt_count(src->deopt_count());
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- return result;
-}
-
-AllocationResult Heap::AllocateRawFixedArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = FixedArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- AllocationResult result = AllocateRaw(size, space);
- if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
- FLAG_use_marking_progress_bar) {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(result.ToObjectChecked()->address());
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
- }
- return result;
-}
-
-AllocationResult Heap::AllocateFixedArrayWithFiller(
- RootListIndex map_root_index, int length, PretenureFlag pretenure,
- Object* filler) {
- // Zero-length case must be handled outside, where the knowledge about
- // the map is.
- DCHECK_LT(0, length);
- DCHECK(!InNewSpace(filler));
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
- DCHECK(RootIsImmortalImmovable(map_root_index));
- Map* map = Map::cast(root(map_root_index));
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
- FixedArray* array = FixedArray::cast(result);
- array->set_length(length);
- MemsetPointer(array->data_start(), filler, length);
- return array;
-}
-
-AllocationResult Heap::AllocatePropertyArray(int length,
- PretenureFlag pretenure) {
- // Allow length = 0 for the empty_property_array singleton.
- DCHECK_LE(0, length);
- DCHECK_IMPLIES(length == 0, pretenure == TENURED);
-
- DCHECK(!InNewSpace(undefined_value()));
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
-
- result->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
- PropertyArray* array = PropertyArray::cast(result);
- array->initialize_length(length);
- MemsetPointer(array->data_start(), undefined_value(), length);
- return result;
-}
-
-AllocationResult Heap::AllocateUninitializedFixedArray(
- int length, PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- HeapObject* obj = nullptr;
- {
- AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
- if (!allocation.To(&obj)) return allocation;
+ // Notify the heap object allocation tracker of change in object layout. The
+ // array may not be moved during GC, and size has to be adjusted nevertheless.
+ for (auto& tracker : allocation_trackers_) {
+ tracker->UpdateObjectSizeEvent(object->address(), object->Size());
}
-
- obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(length);
- return obj;
}
-
-AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
- int length, PretenureFlag pretenure) {
- if (length == 0) return empty_fixed_array();
-
- HeapObject* elements = nullptr;
- AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
- if (!allocation.To(&elements)) return allocation;
-
- elements->set_map_after_allocation(fixed_double_array_map(),
- SKIP_WRITE_BARRIER);
- FixedDoubleArray::cast(elements)->set_length(length);
- return elements;
-}
-
-
-AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
- }
- int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
- if (!allocation.To(&object)) return allocation;
- }
-
- return object;
-}
-
-AllocationResult Heap::AllocateRawFeedbackVector(int length,
- PretenureFlag pretenure) {
- DCHECK_LE(0, length);
-
- int size = FeedbackVector::SizeFor(length);
- AllocationSpace space = SelectSpace(pretenure);
-
- HeapObject* object = nullptr;
- {
- AllocationResult allocation = AllocateRaw(size, space);
- if (!allocation.To(&object)) return allocation;
- }
-
- return object;
-}
-
-AllocationResult Heap::AllocateFeedbackVector(SharedFunctionInfo* shared,
- PretenureFlag pretenure) {
- int length = shared->feedback_metadata()->slot_count();
-
- HeapObject* result = nullptr;
- {
- AllocationResult allocation = AllocateRawFeedbackVector(length, pretenure);
- if (!allocation.To(&result)) return allocation;
- }
-
- // Initialize the object's map.
- result->set_map_after_allocation(feedback_vector_map(), SKIP_WRITE_BARRIER);
- FeedbackVector* vector = FeedbackVector::cast(result);
- vector->set_shared_function_info(shared);
- vector->set_optimized_code_cell(Smi::FromEnum(
- FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
- : OptimizationMarker::kNone));
- vector->set_length(length);
- vector->set_invocation_count(0);
- vector->set_profiler_ticks(0);
- vector->set_deopt_count(0);
- // TODO(leszeks): Initialize based on the feedback metadata.
- MemsetPointer(vector->slots_start(), undefined_value(), length);
- return vector;
-}
-
-AllocationResult Heap::AllocateSymbol() {
- // Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
-
- HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
- if (!allocation.To(&result)) return allocation;
-
- result->set_map_after_allocation(symbol_map(), SKIP_WRITE_BARRIER);
-
- // Generate a random hash value.
- int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
-
- Symbol::cast(result)
- ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
- Symbol::cast(result)->set_name(undefined_value());
- Symbol::cast(result)->set_flags(0);
-
- DCHECK(!Symbol::cast(result)->is_private());
- return result;
-}
-
-AllocationResult Heap::AllocateStruct(InstanceType type,
- PretenureFlag pretenure) {
- Map* map;
- switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
- case NAME##_TYPE: \
- map = name##_map(); \
- break;
- STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
- default:
- UNREACHABLE();
- }
- int size = map->instance_size();
- Struct* result = nullptr;
- {
- AllocationSpace space = SelectSpace(pretenure);
- AllocationResult allocation = Allocate(map, space);
- if (!allocation.To(&result)) return allocation;
- }
- result->InitializeBody(size);
- return result;
-}
-
-
void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
}
@@ -4191,6 +2971,34 @@ bool Heap::HasLowAllocationRate() {
HasLowOldGenerationAllocationRate();
}
+bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization) {
+ const double kHighHeapPercentage = 0.8;
+ const double kLowMutatorUtilization = 0.4;
+ return old_generation_size >=
+ kHighHeapPercentage * max_old_generation_size_ &&
+ mutator_utilization < kLowMutatorUtilization;
+}
+
+void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization) {
+ const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
+ if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
+ if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
+ consecutive_ineffective_mark_compacts_ = 0;
+ return;
+ }
+ ++consecutive_ineffective_mark_compacts_;
+ if (consecutive_ineffective_mark_compacts_ ==
+ kMaxConsecutiveIneffectiveMarkCompacts) {
+ if (InvokeNearHeapLimitCallback()) {
+ // The callback increased the heap limit.
+ consecutive_ineffective_mark_compacts_ = 0;
+ return;
+ }
+ FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
+ }
+}
bool Heap::HasHighFragmentation() {
size_t used = PromotedSpaceSizeOfObjects();
@@ -4207,8 +3015,9 @@ bool Heap::HasHighFragmentation(size_t used, size_t committed) {
}
bool Heap::ShouldOptimizeForMemoryUsage() {
+ const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
- HighMemoryPressure();
+ HighMemoryPressure() || !CanExpandOldGeneration(kOldGenerationSlack);
}
void Heap::ActivateMemoryReducerIfNeeded() {
@@ -4328,17 +3137,22 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
class SlotCollectingVisitor final : public ObjectVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
slots_.push_back(p);
}
}
int number_of_slots() { return static_cast<int>(slots_.size()); }
- Object** slot(int i) { return slots_[i]; }
+ MaybeObject** slot(int i) { return slots_[i]; }
private:
- std::vector<Object**> slots_;
+ std::vector<MaybeObject**> slots_;
};
void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
@@ -4596,16 +3410,43 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
}
-void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
- void* data) {
- out_of_memory_callback_ = callback;
- out_of_memory_callback_data_ = data;
+void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ void* data) {
+ const size_t kMaxCallbacks = 100;
+ CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
+ for (auto callback_data : near_heap_limit_callbacks_) {
+ CHECK_NE(callback_data.first, callback);
+ }
+ near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
+}
+
+void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit) {
+ for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
+ if (near_heap_limit_callbacks_[i].first == callback) {
+ near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
+ if (heap_limit) {
+ RestoreHeapLimit(heap_limit);
+ }
+ return;
+ }
+ }
+ UNREACHABLE();
}
-void Heap::InvokeOutOfMemoryCallback() {
- if (out_of_memory_callback_) {
- out_of_memory_callback_(out_of_memory_callback_data_);
+bool Heap::InvokeNearHeapLimitCallback() {
+ if (near_heap_limit_callbacks_.size() > 0) {
+ v8::NearHeapLimitCallback callback =
+ near_heap_limit_callbacks_.back().first;
+ void* data = near_heap_limit_callbacks_.back().second;
+ size_t heap_limit = callback(data, max_old_generation_size_,
+ initial_max_old_generation_size_);
+ if (heap_limit > max_old_generation_size_) {
+ max_old_generation_size_ = heap_limit;
+ return true;
+ }
}
+ return false;
}
void Heap::CollectCodeStatistics() {
@@ -4695,7 +3536,7 @@ bool Heap::Contains(HeapObject* value) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value));
+ lo_space_->Contains(value) || read_only_space_->Contains(value));
}
bool Heap::ContainsSlow(Address addr) {
@@ -4705,7 +3546,8 @@ bool Heap::ContainsSlow(Address addr) {
return HasBeenSetUp() &&
(new_space_->ToSpaceContainsSlow(addr) ||
old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
- map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
+ map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
+ read_only_space_->Contains(addr));
}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
@@ -4725,6 +3567,8 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
return map_space_->Contains(value);
case LO_SPACE:
return lo_space_->Contains(value);
+ case RO_SPACE:
+ return read_only_space_->Contains(value);
}
UNREACHABLE();
}
@@ -4746,6 +3590,8 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
return map_space_->ContainsSlow(addr);
case LO_SPACE:
return lo_space_->ContainsSlow(addr);
+ case RO_SPACE:
+ return read_only_space_->ContainsSlow(addr);
}
UNREACHABLE();
}
@@ -4758,6 +3604,7 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) {
case CODE_SPACE:
case MAP_SPACE:
case LO_SPACE:
+ case RO_SPACE:
return true;
default:
return false;
@@ -4783,6 +3630,22 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
}
#ifdef VERIFY_HEAP
+class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
+ protected:
+ void VerifyPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointersVisitor::VerifyPointers(host, start, end);
+
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ CHECK(
+ object->GetIsolate()->heap()->read_only_space()->Contains(object));
+ }
+ }
+ }
+};
+
void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
@@ -4806,7 +3669,8 @@ void Heap::Verify() {
lo_space_->Verify();
- mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
+ VerifyReadOnlyPointersVisitor read_only_visitor;
+ read_only_space_->Verify(&read_only_visitor);
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -4815,10 +3679,22 @@ class SlotVerifyingVisitor : public ObjectVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: untyped_(untyped), typed_(typed) {}
- virtual bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) = 0;
+ virtual bool ShouldHaveBeenRecorded(HeapObject* host,
+ MaybeObject* target) = 0;
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+#ifdef DEBUG
for (Object** slot = start; slot < end; slot++) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ }
+#endif // DEBUG
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** slot = start; slot < end; slot++) {
if (ShouldHaveBeenRecorded(host, *slot)) {
CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
}
@@ -4827,7 +3703,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- if (ShouldHaveBeenRecorded(host, target)) {
+ if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(
InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
@@ -4837,7 +3713,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
Object* target = rinfo->target_object();
- if (ShouldHaveBeenRecorded(host, target)) {
+ if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
@@ -4858,10 +3734,11 @@ class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
std::set<std::pair<SlotType, Address> >* typed)
: SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
- bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) override {
- DCHECK_IMPLIES(target->IsHeapObject() && heap_->InNewSpace(target),
- heap_->InToSpace(target));
- return target->IsHeapObject() && heap_->InNewSpace(target) &&
+ bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
+ DCHECK_IMPLIES(
+ target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target),
+ heap_->InToSpace(target));
+ return target->IsStrongOrWeakHeapObject() && heap_->InNewSpace(target) &&
!heap_->InNewSpace(host);
}
@@ -4938,6 +3815,14 @@ void Heap::ZapFromSpace() {
}
}
+void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
+#ifdef DEBUG
+ for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+ reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
+ }
+#endif
+}
+
void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -5128,7 +4013,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
// Overwrite default configuration.
if (max_semi_space_size_in_kb != 0) {
max_semi_space_size_ =
- ROUND_UP(max_semi_space_size_in_kb * KB, Page::kPageSize);
+ RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
}
if (max_old_generation_size_in_mb != 0) {
max_old_generation_size_ = max_old_generation_size_in_mb * MB;
@@ -5144,9 +4029,9 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
if (Page::kPageSize > MB) {
- max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
+ max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
max_old_generation_size_ =
- ROUND_UP(max_old_generation_size_, Page::kPageSize);
+ RoundUp<Page::kPageSize>(max_old_generation_size_);
}
if (FLAG_stress_compaction) {
@@ -5178,7 +4063,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
} else {
initial_semispace_size_ =
- ROUND_UP(initial_semispace_size, Page::kPageSize);
+ RoundUp<Page::kPageSize>(initial_semispace_size);
}
}
@@ -5189,7 +4074,8 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
}
// The old generation is paged and needs at least one page for each space.
- int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ int paged_space_count =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
initial_max_old_generation_size_ = max_old_generation_size_ =
Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
@@ -5593,6 +4479,73 @@ void Heap::DisableInlineAllocation() {
}
}
+HeapObject* Heap::AllocateRawWithRetry(int size, AllocationSpace space,
+ AllocationAlignment alignment) {
+ AllocationResult alloc = AllocateRaw(size, space, alignment);
+ HeapObject* result;
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // Two GCs before panicking. In newspace will almost always succeed.
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(alloc.RetrySpace(),
+ GarbageCollectionReason::kAllocationFailure);
+ alloc = AllocateRaw(size, space, alignment);
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ {
+ AlwaysAllocateScope scope(isolate());
+ alloc = AllocateRaw(size, space, alignment);
+ }
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // TODO(1181417): Fix this.
+ FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
+ return nullptr;
+}
+
+// TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
+// parameter and just do what's necessary.
+HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
+ AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ HeapObject* result;
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // Two GCs before panicking.
+ for (int i = 0; i < 2; i++) {
+ CollectGarbage(alloc.RetrySpace(),
+ GarbageCollectionReason::kAllocationFailure);
+ alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ }
+ isolate()->counters()->gc_last_resort_from_handles()->Increment();
+ CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
+ {
+ AlwaysAllocateScope scope(isolate());
+ alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
+ }
+ if (alloc.To(&result)) {
+ DCHECK(result != exception());
+ return result;
+ }
+ // TODO(1181417): Fix this.
+ FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
+ return nullptr;
+}
+
bool Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5622,7 +4575,8 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this);
incremental_marking_ =
- new IncrementalMarking(this, mark_compact_collector_->marking_worklist());
+ new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
+ mark_compact_collector_->weak_objects());
if (FLAG_concurrent_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
@@ -5660,6 +4614,10 @@ bool Heap::SetUp() {
space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
if (!lo_space_->SetUp()) return false;
+ space_[RO_SPACE] = read_only_space_ =
+ new ReadOnlySpace(this, RO_SPACE, NOT_EXECUTABLE);
+ if (!read_only_space_->SetUp()) return false;
+
// Set up the seed that is used to randomize the string hash function.
DCHECK_EQ(Smi::kZero, hash_seed());
if (FLAG_randomize_hashes) InitializeHashSeed();
@@ -5670,7 +4628,11 @@ bool Heap::SetUp() {
}
tracer_ = new GCTracer(this);
+#ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
+#else
+ minor_mark_compact_collector_ = nullptr;
+#endif // ENABLE_MINOR_MC
array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
@@ -5687,9 +4649,11 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
mark_compact_collector()->SetUp();
+#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector() != nullptr) {
minor_mark_compact_collector()->SetUp();
}
+#endif // ENABLE_MINOR_MC
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
@@ -5711,6 +4675,8 @@ bool Heap::SetUp() {
write_protect_code_memory_ = FLAG_write_protect_code_memory;
+ external_reference_table_.Init(isolate_);
+
return true;
}
@@ -5822,9 +4788,10 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
}
}
+void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
+
void Heap::TearDown() {
- SetGCState(TEAR_DOWN);
- DCHECK(!use_tasks_);
+ DCHECK_EQ(gc_state_, TEAR_DOWN);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -5868,11 +4835,13 @@ void Heap::TearDown() {
mark_compact_collector_ = nullptr;
}
+#ifdef ENABLE_MINOR_MC
if (minor_mark_compact_collector_ != nullptr) {
minor_mark_compact_collector_->TearDown();
delete minor_mark_compact_collector_;
minor_mark_compact_collector_ = nullptr;
}
+#endif // ENABLE_MINOR_MC
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
@@ -5914,6 +4883,11 @@ void Heap::TearDown() {
external_string_table_.TearDown();
+ // Tear down all ArrayBuffers before tearing down the heap since their
+ // byte_length may be a HeapNumber which is required for freeing the backing
+ // store.
+ ArrayBufferTracker::TearDown(this);
+
delete tracer_;
tracer_ = nullptr;
@@ -5942,6 +4916,11 @@ void Heap::TearDown() {
lo_space_ = nullptr;
}
+ if (read_only_space_ != nullptr) {
+ delete read_only_space_;
+ read_only_space_ = nullptr;
+ }
+
store_buffer()->TearDown();
memory_allocator()->TearDown();
@@ -6006,75 +4985,48 @@ void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
UNREACHABLE();
}
-// TODO(ishell): Find a better place for this.
-void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<WeakCell> code) {
- DCHECK(InNewSpace(*obj));
- DCHECK(!InNewSpace(*code));
- Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
- list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
- if (*list != weak_new_space_object_to_code_list()) {
- set_weak_new_space_object_to_code_list(*list);
- }
-}
-
-// TODO(ishell): Find a better place for this.
-void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<DependentCode> dep) {
- DCHECK(!InNewSpace(*obj));
- DCHECK(!InNewSpace(*dep));
- Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
- table = WeakHashTable::Put(table, obj, dep);
- if (*table != weak_object_to_code_table())
- set_weak_object_to_code_table(*table);
- DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
-}
-
-
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
- Object* dep = weak_object_to_code_table()->Lookup(obj);
- if (dep->IsDependentCode()) return DependentCode::cast(dep);
- return DependentCode::cast(empty_fixed_array());
-}
-
namespace {
-void CompactWeakFixedArray(Object* object) {
- if (object->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(object);
- array->Compact<WeakFixedArray::NullCallback>();
+void CompactFixedArrayOfWeakCells(Object* object) {
+ if (object->IsFixedArrayOfWeakCells()) {
+ FixedArrayOfWeakCells* array = FixedArrayOfWeakCells::cast(object);
+ array->Compact<FixedArrayOfWeakCells::NullCallback>();
}
}
} // anonymous namespace
-void Heap::CompactWeakFixedArrays() {
- // Find known WeakFixedArrays and compact them.
+void Heap::CompactFixedArraysOfWeakCells() {
+ // Find known FixedArrayOfWeakCells and compact them.
HeapIterator iterator(this);
for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
if (o->IsPrototypeInfo()) {
Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
- if (prototype_users->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
+ if (prototype_users->IsFixedArrayOfWeakCells()) {
+ FixedArrayOfWeakCells* array =
+ FixedArrayOfWeakCells::cast(prototype_users);
array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
}
}
}
- CompactWeakFixedArray(noscript_shared_function_infos());
- CompactWeakFixedArray(script_list());
- CompactWeakFixedArray(weak_stack_trace_list());
+ CompactFixedArrayOfWeakCells(noscript_shared_function_infos());
+ CompactFixedArrayOfWeakCells(script_list());
+ CompactFixedArrayOfWeakCells(weak_stack_trace_list());
}
void Heap::AddRetainedMap(Handle<Map> map) {
+ if (map->is_in_retained_map_list()) {
+ return;
+ }
Handle<WeakCell> cell = Map::WeakCellForMap(map);
Handle<ArrayList> array(retained_maps(), isolate());
if (array->IsFull()) {
CompactRetainedMaps(*array);
}
array = ArrayList::Add(
- array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
- ArrayList::kReloadLengthAfterAllocation);
+ array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
if (*array != retained_maps()) {
set_retained_maps(*array);
}
+ map->set_is_in_retained_map_list(true);
}
@@ -6106,8 +5058,8 @@ void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
if (new_length != length) retained_maps->SetLength(new_length);
}
-void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
- v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
+void Heap::FatalProcessOutOfMemory(const char* location) {
+ v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
}
#ifdef DEBUG
@@ -6211,6 +5163,10 @@ void Heap::RecordWritesIntoCode(Code* code) {
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
+ case RO_SPACE:
+ // skip NEW_SPACE
+ counter_++;
+ return heap_->read_only_space();
case OLD_SPACE:
return heap_->old_space();
case CODE_SPACE:
@@ -6285,12 +5241,19 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
void VisitPointers(HeapObject* host, Object** start,
Object** end) override {
+ MarkPointers(reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
MarkPointers(start, end);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
- MarkPointers(start, end);
+ MarkPointers(reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
}
void TransitiveClosure() {
@@ -6302,12 +5265,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
private:
- void MarkPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- HeapObject* obj = HeapObject::cast(*p);
- if (filter_->MarkAsReachable(obj)) {
- marking_stack_.push_back(obj);
+ void MarkPointers(MaybeObject** start, MaybeObject** end) {
+ // Treat weak references as strong.
+ for (MaybeObject** p = start; p < end; p++) {
+ HeapObject* heap_object;
+ if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
+ if (filter_->MarkAsReachable(heap_object)) {
+ marking_stack_.push_back(heap_object);
+ }
}
}
}
@@ -6606,6 +5571,8 @@ const char* AllocationSpaceName(AllocationSpace space) {
return "MAP_SPACE";
case LO_SPACE:
return "LO_SPACE";
+ case RO_SPACE:
+ return "RO_SPACE";
default:
UNREACHABLE();
}
@@ -6614,23 +5581,32 @@ const char* AllocationSpaceName(AllocationSpace space) {
void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
Object** end) {
- VerifyPointers(start, end);
+ VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+}
+
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) {
+ VerifyPointers(host, start, end);
}
void VerifyPointersVisitor::VisitRootPointers(Root root,
const char* description,
Object** start, Object** end) {
- VerifyPointers(start, end);
+ VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
}
-void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
+void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
+ MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
CHECK(object->GetIsolate()->heap()->Contains(object));
CHECK(object->map()->IsMap());
} else {
- CHECK((*current)->IsSmi());
+ CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
}
}
}
@@ -6668,6 +5644,7 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == CODE_SPACE && type == CODE_TYPE;
case MAP_SPACE:
case LO_SPACE:
+ case RO_SPACE:
return false;
}
UNREACHABLE();
@@ -6720,9 +5697,7 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Map* map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == code->GetHeap()->code_map());
#ifdef V8_EMBEDDED_BUILTINS
- if (FLAG_stress_off_heap_code) {
- if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
- }
+ if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
#endif
Address start = code->address();
Address end = code->address() + code->SizeFromMap(map);
@@ -6731,10 +5706,8 @@ bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
#ifdef V8_EMBEDDED_BUILTINS
- if (FLAG_stress_off_heap_code) {
- Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
- if (code != nullptr) return code;
- }
+ Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
+ if (code != nullptr) return code;
#endif
// Check if the inner pointer points into a large object chunk.
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 63bcfb2990..cdd44f7a15 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -8,6 +8,7 @@
#include <cmath>
#include <map>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
@@ -17,6 +18,7 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
+#include "src/external-reference-table.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/objects.h"
@@ -38,6 +40,7 @@ class HeapTester;
class TestMemoryAllocatorScope;
} // namespace heap
+class BoilerplateDescription;
class BytecodeArray;
class CodeDataContainer;
class DeoptimizationData;
@@ -87,7 +90,7 @@ using v8::MemoryPressureLevel;
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
- /* being compacted. */ \
+ /* being compacted.*/ \
/* Oddballs */ \
V(Oddball, arguments_marker, ArgumentsMarker) \
V(Oddball, exception, Exception) \
@@ -105,31 +108,36 @@ using v8::MemoryPressureLevel;
V(Map, debug_evaluate_context_map, DebugEvaluateContextMap) \
V(Map, script_context_table_map, ScriptContextTableMap) \
/* Maps */ \
- V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, feedback_metadata_map, FeedbackMetadataArrayMap) \
V(Map, array_list_map, ArrayListMap) \
+ V(Map, bigint_map, BigIntMap) \
+ V(Map, boilerplate_description_map, BoilerplateDescriptionMap) \
+ V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(Map, code_data_container_map, CodeDataContainerMap) \
+ V(Map, descriptor_array_map, DescriptorArrayMap) \
+ V(Map, external_map, ExternalMap) \
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
+ V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, message_object_map, JSMessageObjectMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
- V(Map, ordered_hash_map_map, OrderedHashMapMap) \
- V(Map, ordered_hash_set_map, OrderedHashSetMap) \
V(Map, name_dictionary_map, NameDictionaryMap) \
- V(Map, global_dictionary_map, GlobalDictionaryMap) \
+ V(Map, no_closures_cell_map, NoClosuresCellMap) \
V(Map, number_dictionary_map, NumberDictionaryMap) \
+ V(Map, one_closure_cell_map, OneClosureCellMap) \
+ V(Map, ordered_hash_map_map, OrderedHashMapMap) \
+ V(Map, ordered_hash_set_map, OrderedHashSetMap) \
+ V(Map, property_array_map, PropertyArrayMap) \
+ V(Map, side_effect_call_handler_info_map, SideEffectCallHandlerInfoMap) \
+ V(Map, side_effect_free_call_handler_info_map, \
+ SideEffectFreeCallHandlerInfoMap) \
V(Map, simple_number_dictionary_map, SimpleNumberDictionaryMap) \
- V(Map, string_table_map, StringTableMap) \
- V(Map, weak_hash_table_map, WeakHashTableMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, small_ordered_hash_map_map, SmallOrderedHashMapMap) \
V(Map, small_ordered_hash_set_map, SmallOrderedHashSetMap) \
- V(Map, code_data_container_map, CodeDataContainerMap) \
- V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, external_map, ExternalMap) \
- V(Map, bytecode_array_map, BytecodeArrayMap) \
- V(Map, module_info_map, ModuleInfoMap) \
- V(Map, no_closures_cell_map, NoClosuresCellMap) \
- V(Map, one_closure_cell_map, OneClosureCellMap) \
- V(Map, many_closures_cell_map, ManyClosuresCellMap) \
- V(Map, property_array_map, PropertyArrayMap) \
- V(Map, bigint_map, BigIntMap) \
+ V(Map, string_table_map, StringTableMap) \
+ V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -186,6 +194,8 @@ using v8::MemoryPressureLevel;
V(EnumCache, empty_enum_cache, EmptyEnumCache) \
V(PropertyArray, empty_property_array, EmptyPropertyArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
+ V(BoilerplateDescription, empty_boilerplate_description, \
+ EmptyBoilerplateDescription) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
@@ -206,20 +216,25 @@ using v8::MemoryPressureLevel;
EmptySlowElementDictionary) \
V(FixedArray, empty_ordered_hash_map, EmptyOrderedHashMap) \
V(FixedArray, empty_ordered_hash_set, EmptyOrderedHashSet) \
+ V(FeedbackMetadata, empty_feedback_metadata, EmptyFeedbackMetadata) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(Cell, invalid_prototype_validity_cell, InvalidPrototypeValidityCell) \
V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
+ V(WeakFixedArray, empty_weak_fixed_array, EmptyWeakFixedArray) \
/* Protectors */ \
V(Cell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, no_elements_protector, NoElementsProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
- V(PropertyCell, species_protector, SpeciesProtector) \
+ V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
+ V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
+ V(PropertyCell, promise_species_protector, PromiseSpeciesProtector) \
V(Cell, string_length_protector, StringLengthProtector) \
- V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(PropertyCell, array_iterator_protector, ArrayIteratorProtector) \
V(PropertyCell, array_buffer_neutering_protector, \
ArrayBufferNeuteringProtector) \
V(PropertyCell, promise_hook_protector, PromiseHookProtector) \
+ V(Cell, promise_resolve_protector, PromiseResolveProtector) \
V(PropertyCell, promise_then_protector, PromiseThenProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
@@ -244,12 +259,6 @@ using v8::MemoryPressureLevel;
V(FixedArray, detached_contexts, DetachedContexts) \
V(HeapObject, retaining_path_targets, RetainingPathTargets) \
V(ArrayList, retained_maps, RetainedMaps) \
- V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable) \
- /* weak_new_space_object_to_code_list is an array of weak cells, where */ \
- /* slots with even indices refer to the weak object, and the subsequent */ \
- /* slots refer to the code with the reference to the weak object. */ \
- V(ArrayList, weak_new_space_object_to_code_list, \
- WeakNewSpaceObjectToCodeList) \
/* Indirection lists for isolate-independent builtins */ \
V(FixedArray, builtins_constants_table, BuiltinsConstantsTable) \
/* Feedback vectors that we need for code coverage or type profile */ \
@@ -275,6 +284,7 @@ using v8::MemoryPressureLevel;
V(Smi, stack_limit, StackLimit) \
V(Smi, real_stack_limit, RealStackLimit) \
V(Smi, last_script_id, LastScriptId) \
+ V(Smi, last_debugging_id, LastDebuggingId) \
V(Smi, hash_seed, HashSeed) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
@@ -299,15 +309,16 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(ArrayBufferNeuteringProtector) \
V(ArrayIteratorProtector) \
- V(NoElementsProtector) \
V(BigIntMap) \
V(BlockContextMap) \
+ V(BoilerplateDescriptionMap) \
V(BooleanMap) \
V(ByteArrayMap) \
V(BytecodeArrayMap) \
V(CatchContextMap) \
V(CellMap) \
V(CodeMap) \
+ V(DebugEvaluateContextMap) \
V(DescriptorArrayMap) \
V(EmptyByteArray) \
V(EmptyDescriptorArray) \
@@ -328,12 +339,10 @@ using v8::MemoryPressureLevel;
V(EmptyScript) \
V(EmptySloppyArgumentsElements) \
V(EmptySlowElementDictionary) \
- V(empty_string) \
V(EmptyWeakCell) \
V(EvalContextMap) \
V(Exception) \
V(FalseValue) \
- V(FastArrayIterationProtector) \
V(FixedArrayMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
@@ -347,9 +356,9 @@ using v8::MemoryPressureLevel;
V(HoleNanValue) \
V(InfinityValue) \
V(IsConcatSpreadableProtector) \
+ V(JSMessageObjectMap) \
V(JsConstructEntryCode) \
V(JsEntryCode) \
- V(JSMessageObjectMap) \
V(ManyClosuresCell) \
V(ManyClosuresCellMap) \
V(MetaMap) \
@@ -362,6 +371,7 @@ using v8::MemoryPressureLevel;
V(NanValue) \
V(NativeContextMap) \
V(NoClosuresCellMap) \
+ V(NoElementsProtector) \
V(NullMap) \
V(NullValue) \
V(NumberDictionaryMap) \
@@ -373,12 +383,15 @@ using v8::MemoryPressureLevel;
V(PropertyArrayMap) \
V(ScopeInfoMap) \
V(ScriptContextMap) \
+ V(ScriptContextTableMap) \
V(SharedFunctionInfoMap) \
V(SimpleNumberDictionaryMap) \
V(SloppyArgumentsElementsMap) \
V(SmallOrderedHashMapMap) \
V(SmallOrderedHashSetMap) \
- V(SpeciesProtector) \
+ V(ArraySpeciesProtector) \
+ V(TypedArraySpeciesProtector) \
+ V(PromiseSpeciesProtector) \
V(StaleRegister) \
V(StringLengthProtector) \
V(StringTableMap) \
@@ -394,8 +407,9 @@ using v8::MemoryPressureLevel;
V(UninitializedMap) \
V(UninitializedValue) \
V(WeakCellMap) \
- V(WeakHashTableMap) \
+ V(WeakFixedArrayMap) \
V(WithContextMap) \
+ V(empty_string) \
PRIVATE_SYMBOL_LIST(V)
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
@@ -412,6 +426,7 @@ class GCIdleTimeAction;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+class HeapObjectAllocationTracker;
class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
@@ -442,6 +457,8 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
+enum class ClearFreedMemoryMode { kClearFreedMemory, kDontClearFreedMemory };
+
enum class FixedArrayVisitationMode { kRegular, kIncremental };
enum class TraceRetainingPathMode { kEnabled, kDisabled };
@@ -671,11 +688,7 @@ class Heap {
// given alignment.
static int GetFillToAlign(Address address, AllocationAlignment alignment);
- template <typename T>
- static inline bool IsOneByte(T t, int chars);
-
- static void FatalProcessOutOfMemory(const char* location,
- bool is_heap_oom = false);
+ void FatalProcessOutOfMemory(const char* location);
V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
@@ -704,7 +717,11 @@ class Heap {
}
static inline GarbageCollector YoungGenerationCollector() {
+#if ENABLE_MINOR_MC
return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+#else
+ return SCAVENGER;
+#endif // ENABLE_MINOR_MC
}
static inline const char* CollectorName(GarbageCollector collector) {
@@ -748,9 +765,13 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo.
- V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(Address addr, int size,
- ClearRecordedSlots mode);
+ // pass ClearRecordedSlots::kNo. If the memory after the object header of
+ // the filler should be cleared, pass in kClearFreedMemory. The default is
+ // kDontClearFreedMemory.
+ V8_EXPORT_PRIVATE HeapObject* CreateFillerObjectAt(
+ Address addr, int size, ClearRecordedSlots clear_slots_mode,
+ ClearFreedMemoryMode clear_memory_mode =
+ ClearFreedMemoryMode::kDontClearFreedMemory);
bool CanMoveObjectStart(HeapObject* object);
@@ -819,8 +840,26 @@ class Heap {
code_space_memory_modification_scope_depth_--;
}
+ void UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk);
+ void UnprotectAndRegisterMemoryChunk(HeapObject* object);
+ void UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk);
+ V8_EXPORT_PRIVATE void ProtectUnprotectedMemoryChunks();
+
+ void EnableUnprotectedMemoryChunksRegistry() {
+ unprotected_memory_chunks_registry_enabled_ = true;
+ }
+
+ void DisableUnprotectedMemoryChunksRegistry() {
+ unprotected_memory_chunks_registry_enabled_ = false;
+ }
+
+ bool unprotected_memory_chunks_registry_enabled() {
+ return unprotected_memory_chunks_registry_enabled_;
+ }
+
inline HeapState gc_state() { return gc_state_; }
void SetGCState(HeapState state);
+ bool IsTearingDown() const { return gc_state_ == TEAR_DOWN; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -846,8 +885,9 @@ class Heap {
bool is_isolate_locked);
void CheckMemoryPressure();
- void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
- void* data);
+ void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
+ void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit);
double MonotonicallyIncreasingTimeInMs();
@@ -867,6 +907,7 @@ class Heap {
inline uint32_t HashSeed();
inline int NextScriptId();
+ inline int NextDebuggingId();
inline int GetNextTemplateSerialNumber();
void SetSerializedObjects(FixedArray* objects);
@@ -889,15 +930,7 @@ class Heap {
external_memory_concurrently_freed_.SetValue(0);
}
- void AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<WeakCell> code);
-
- void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<DependentCode> dep);
-
- DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
-
- void CompactWeakFixedArrays();
+ void CompactFixedArraysOfWeakCells();
void AddRetainedMap(Handle<Map> map);
@@ -925,28 +958,11 @@ class Heap {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
- size_t HeapLimitForDebugging() {
- const size_t kDebugHeapSizeFactor = 4;
- size_t max_limit = std::numeric_limits<size_t>::max() / 4;
- return Min(max_limit,
- initial_max_old_generation_size_ * kDebugHeapSizeFactor);
- }
-
- void IncreaseHeapLimitForDebugging() {
- max_old_generation_size_ =
- Max(max_old_generation_size_, HeapLimitForDebugging());
- }
-
- void RestoreOriginalHeapLimit() {
+ void RestoreHeapLimit(size_t heap_limit) {
// Do not set the limit lower than the live size + some slack.
size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
max_old_generation_size_ =
- Min(max_old_generation_size_,
- Max(initial_max_old_generation_size_, min_limit));
- }
-
- bool IsHeapLimitIncreasedForDebugging() {
- return max_old_generation_size_ == HeapLimitForDebugging();
+ Min(max_old_generation_size_, Max(heap_limit, min_limit));
}
// ===========================================================================
@@ -977,16 +993,15 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
+ // Sets the TearDown state, so no new GC tasks get posted.
+ void StartTearDown();
+
// Destroys all memory allocated by the heap.
void TearDown();
// Returns whether SetUp has been called.
bool HasBeenSetUp();
- void stop_using_tasks() { use_tasks_ = false; }
-
- bool use_tasks() const { return use_tasks_; }
-
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -998,6 +1013,7 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
+ ReadOnlySpace* read_only_space() { return read_only_space_; }
inline PagedSpace* paged_space(int idx);
inline Space* space(int idx);
@@ -1080,6 +1096,15 @@ class Heap {
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
+ ExternalReferenceTable* external_reference_table() {
+ DCHECK(external_reference_table_.is_initialized());
+ return &external_reference_table_;
+ }
+
+ static constexpr int roots_to_external_reference_table_offset() {
+ return kRootsExternalReferenceTableOffset;
+ }
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void SetRootCodeStubs(SimpleNumberDictionary* value);
@@ -1116,9 +1141,7 @@ class Heap {
bool RootCanBeTreatedAsConstant(RootListIndex root_index);
Map* MapForFixedTypedArray(ExternalArrayType array_type);
- RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
-
- RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+ Map* MapForFixedTypedArray(ElementsKind elements_kind);
FixedTypedArrayBase* EmptyFixedTypedArrayForMap(const Map* map);
void RegisterStrongRoots(Object** start, Object** end);
@@ -1197,6 +1220,8 @@ class Heap {
// ===========================================================================
// Write barrier support for object[offset] = o;
+ inline void RecordWrite(Object* object, MaybeObject** slot,
+ MaybeObject* value);
inline void RecordWrite(Object* object, Object** slot, Object* value);
inline void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* target);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* target);
@@ -1222,6 +1247,11 @@ class Heap {
// Incremental marking API. ==================================================
// ===========================================================================
+ int GCFlagsForIncrementalMarking() {
+ return ShouldOptimizeForMemoryUsage() ? kReduceMemoryFootprintMask
+ : kNoGCFlags;
+ }
+
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
void StartIdleIncrementalMarking(
@@ -1315,8 +1345,14 @@ class Heap {
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
+ inline bool InNewSpace(MaybeObject* object);
+ inline bool InNewSpace(HeapObject* heap_object);
inline bool InFromSpace(Object* object);
+ inline bool InFromSpace(MaybeObject* object);
+ inline bool InFromSpace(HeapObject* heap_object);
inline bool InToSpace(Object* object);
+ inline bool InToSpace(MaybeObject* object);
+ inline bool InToSpace(HeapObject* heap_object);
// Returns whether the object resides in old space.
inline bool InOldSpace(Object* object);
@@ -1373,9 +1409,7 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- size_t MaxReserved() {
- return 2 * max_semi_space_size_ + max_old_generation_size_;
- }
+ size_t MaxReserved();
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
@@ -1541,16 +1575,15 @@ class Heap {
// ===========================================================================
// Creates a filler object and returns a heap object immediately after it.
- MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
- int filler_size);
+ V8_WARN_UNUSED_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+ int filler_size);
// Creates a filler object if needed for alignment and returns a heap object
// immediately after it. If any space is left after the returned object,
// another filler object is created so the over allocated memory is iterable.
- MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
- int object_size,
- int allocation_size,
- AllocationAlignment alignment);
+ V8_WARN_UNUSED_RESULT HeapObject* AlignWithFiller(
+ HeapObject* object, int object_size, int allocation_size,
+ AllocationAlignment alignment);
// ===========================================================================
// ArrayBuffer tracking. =====================================================
@@ -1598,6 +1631,15 @@ class Heap {
}
// ===========================================================================
+ // Heap object allocation tracking. ==========================================
+ // ===========================================================================
+
+ void AddHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
+ void RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker* tracker);
+ bool has_heap_object_allocation_tracker() const {
+ return !allocation_trackers_.empty();
+ }
+
// Retaining path tracking. ==================================================
// ===========================================================================
@@ -1741,7 +1783,7 @@ class Heap {
void* data;
};
- static const int kInitialStringTableSize = 2048;
+ static const int kInitialStringTableSize = StringTable::kMinCapacity;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -1827,15 +1869,6 @@ class Heap {
inline void UpdateOldSpaceLimits();
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, Object* properties, Map* map);
-
- // Initializes JSObject body starting at given offset.
- void InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset);
-
- void InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site);
-
bool CreateInitialMaps();
void CreateInternalAccessorInfoObjects();
void CreateInitialObjects();
@@ -1857,6 +1890,9 @@ class Heap {
// Fill in bogus values in from space
void ZapFromSpace();
+ // Zaps the memory of a code object.
+ void ZapCodeObject(Address start_address, int size_in_bytes);
+
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
@@ -1910,7 +1946,7 @@ class Heap {
void CollectGarbageOnMemoryPressure();
- void InvokeOutOfMemoryCallback();
+ bool InvokeNearHeapLimitCallback();
void ComputeFastPromotionMode(double survival_rate);
@@ -1927,6 +1963,7 @@ class Heap {
// - GCFinalizeMCReduceMemory: finalization of incremental full GC with
// memory reduction
HistogramTimer* GCTypeTimer(GarbageCollector collector);
+ HistogramTimer* GCTypePriorityTimer(GarbageCollector collector);
// ===========================================================================
// Pretenuring. ==============================================================
@@ -2005,6 +2042,11 @@ class Heap {
bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+ bool IsIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization);
+ void CheckIneffectiveMarkCompact(size_t old_generation_size,
+ double mutator_utilization);
+
// ===========================================================================
// Growing strategy. =========================================================
// ===========================================================================
@@ -2041,10 +2083,6 @@ class Heap {
bool CanExpandOldGeneration(size_t size);
- bool IsCloseToOutOfMemory(size_t slack) {
- return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
- }
-
bool ShouldExpandOldGenerationOnSlowAllocation();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
@@ -2071,263 +2109,36 @@ class Heap {
// Allocation methods. =======================================================
// ===========================================================================
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
- AllocationSite* site = nullptr);
-
// Allocates a JS Map in the heap.
- MUST_USE_RESULT AllocationResult
+ V8_WARN_UNUSED_RESULT AllocationResult
AllocateMap(InstanceType instance_type, int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
int inobject_properties = 0);
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // If allocation_site is non-null, then a memento is emitted after the object
- // that points to the site.
- MUST_USE_RESULT AllocationResult AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = nullptr);
-
- // Allocates and initializes a new JavaScript object based on a map.
- // Passing an allocation site means that a memento will be created that
- // points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = nullptr);
-
- // Allocates a HeapNumber from value.
- MUST_USE_RESULT AllocationResult AllocateHeapNumber(
- MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
-
- MUST_USE_RESULT AllocationResult
- AllocateBigInt(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a byte array of the specified length
- MUST_USE_RESULT AllocationResult
- AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a bytecode array with given contents.
- MUST_USE_RESULT AllocationResult
- AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
- int parameter_count, FixedArray* constant_pool);
-
- MUST_USE_RESULT AllocationResult CopyCode(Code* code,
- CodeDataContainer* data_container);
-
- MUST_USE_RESULT AllocationResult
- CopyBytecodeArray(BytecodeArray* bytecode_array);
-
- // Allocates a fixed array-like object with given map and initialized with
- // undefined values.
- MUST_USE_RESULT inline AllocationResult AllocateFixedArrayWithMap(
- RootListIndex map_root_index, int length,
- PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT inline AllocationResult AllocateFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a property array initialized with undefined values
- MUST_USE_RESULT AllocationResult
- AllocatePropertyArray(int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate a feedback vector for the given shared function info. The slots
- // are pre-filled with undefined.
- MUST_USE_RESULT AllocationResult
- AllocateFeedbackVector(SharedFunctionInfo* shared, PretenureFlag pretenure);
-
- // Allocate an uninitialized feedback vector.
- MUST_USE_RESULT AllocationResult
- AllocateRawFeedbackVector(int length, PretenureFlag pretenure);
-
- MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashSet(
- int length, PretenureFlag pretenure = NOT_TENURED);
- MUST_USE_RESULT AllocationResult AllocateSmallOrderedHashMap(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
- MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space,
AllocationAlignment aligment = kWordAligned);
+ HeapObject* AllocateRawWithRetry(
+ int size, AllocationSpace space,
+ AllocationAlignment alignment = kWordAligned);
+ HeapObject* AllocateRawCodeInLargeObjectSpace(int size);
+
// Allocates a heap object based on the map.
- MUST_USE_RESULT AllocationResult
- Allocate(Map* map, AllocationSpace space,
- AllocationSite* allocation_site = nullptr);
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map* map,
+ AllocationSpace space);
// Allocates a partial map for bootstrapping.
- MUST_USE_RESULT AllocationResult
- AllocatePartialMap(InstanceType instance_type, int instance_size);
-
- // Allocate a block of memory in the given space (filled with a filler).
- // Used as a fall-back for generated code when the space is full.
- MUST_USE_RESULT AllocationResult
- AllocateFillerObject(int size, bool double_align, AllocationSpace space);
-
- // Allocate an uninitialized fixed array.
- MUST_USE_RESULT AllocationResult
- AllocateRawFixedArray(int length, PretenureFlag pretenure);
-
- // Allocate an uninitialized fixed double array.
- MUST_USE_RESULT AllocationResult
- AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
-
- // Allocate an initialized fixed array with the given filler value.
- MUST_USE_RESULT AllocationResult
- AllocateFixedArrayWithFiller(RootListIndex map_root_index, int length,
- PretenureFlag pretenure, Object* filler);
-
- // Allocate and partially initializes a String. There are two String
- // encodings: one-byte and two-byte. These functions allocate a string of
- // the given length and set its map and length fields. The characters of
- // the string are uninitialized.
- MUST_USE_RESULT AllocationResult
- AllocateRawOneByteString(int length, PretenureFlag pretenure);
- MUST_USE_RESULT AllocationResult
- AllocateRawTwoByteString(int length, PretenureFlag pretenure);
-
- // Allocates an internalized string in old space based on the character
- // stream.
- MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
- Vector<const char> str, int chars, uint32_t hash_field);
-
- MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
- Vector<const uint8_t> str, uint32_t hash_field);
-
- MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
- Vector<const uc16> str, uint32_t hash_field);
-
- template <bool is_one_byte, typename T>
- MUST_USE_RESULT AllocationResult
- AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
-
- template <typename T>
- MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
- T t, int chars, uint32_t hash_field);
-
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
-
- // Make a copy of src, also grow the copy, and return the copy.
- template <typename T>
- MUST_USE_RESULT AllocationResult CopyArrayAndGrow(T* src, int grow_by,
- PretenureFlag pretenure);
-
- // Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult CopyPropertyArrayAndGrow(
- PropertyArray* src, int grow_by, PretenureFlag pretenure);
-
- // Make a copy of src, also grow the copy, and return the copy.
- MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
- int new_len,
- PretenureFlag pretenure);
-
- // Make a copy of src, set the map, and return the copy.
- template <typename T>
- MUST_USE_RESULT AllocationResult CopyArrayWithMap(T* src, Map* map);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
- Map* map);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult CopyPropertyArray(PropertyArray* src);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
- FixedDoubleArray* src);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT AllocationResult CopyFeedbackVector(FeedbackVector* src);
-
- // Computes a single character string where the character has code.
- // A cache is used for one-byte (Latin1) codes.
- MUST_USE_RESULT AllocationResult
- LookupSingleCharacterStringFromCode(uint16_t code);
-
- // Allocate a symbol in old space.
- MUST_USE_RESULT AllocationResult AllocateSymbol();
-
- // Allocates an external array of the specified length and type.
- MUST_USE_RESULT AllocationResult AllocateFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- PretenureFlag pretenure);
-
- // Allocates a fixed typed array of the specified length and type.
- MUST_USE_RESULT AllocationResult
- AllocateFixedTypedArray(int length, ExternalArrayType array_type,
- bool initialize, PretenureFlag pretenure);
-
- // Make a copy of src and return it.
- MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
-
- // Make a copy of src, set the map, and return the copy.
- MUST_USE_RESULT AllocationResult
- CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
-
- // Allocates a fixed double array with uninitialized values. Returns
- MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
- int length, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocate empty fixed array.
- MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
-
- // Allocate empty scope info.
- MUST_USE_RESULT AllocationResult AllocateEmptyScopeInfo();
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocatePartialMap(InstanceType instance_type, int instance_size);
// Allocate empty fixed typed array of given type.
- MUST_USE_RESULT AllocationResult
- AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
-
- // Allocate a tenured simple cell.
- MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
-
- // Allocate a tenured simple feedback cell.
- MUST_USE_RESULT AllocationResult AllocateFeedbackCell(Map* map,
- HeapObject* value);
-
- // Allocate a tenured JS global property cell initialized with the hole.
- MUST_USE_RESULT AllocationResult AllocatePropertyCell(Name* name);
-
- MUST_USE_RESULT AllocationResult AllocateWeakCell(HeapObject* value);
-
- MUST_USE_RESULT AllocationResult AllocateTransitionArray(int capacity);
-
- // Allocates a new utility object in the old generation.
- MUST_USE_RESULT AllocationResult
- AllocateStruct(InstanceType type, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new foreign object.
- MUST_USE_RESULT AllocationResult
- AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
-
- // Allocates a new code object (mostly uninitialized). Can only be used when
- // code space is unprotected and requires manual initialization by the caller.
- MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
- Movability movability);
-
- // Allocates a new code object (fully initialized). All header fields of the
- // returned object are immutable and the code object is write protected.
- MUST_USE_RESULT AllocationResult AllocateCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, ByteArray* reloc_info,
- CodeDataContainer* data_container, ByteArray* source_position_table,
- DeoptimizationData* deopt_data, Movability movability, uint32_t stub_key,
- bool is_turbofanned, int stack_slots, int safepoint_table_offset,
- int handler_table_offset);
-
- MUST_USE_RESULT AllocationResult AllocateJSPromise(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
+ V8_WARN_UNUSED_RESULT AllocationResult
+ AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
void set_force_oom(bool value) { force_oom_ = value; }
@@ -2361,6 +2172,13 @@ class Heap {
Object* roots_[kRootListLength];
+ // This table is accessed from builtin code compiled into the snapshot, and
+ // thus its offset from roots_ must remain static. This is verified in
+ // Isolate::Init() using runtime checks.
+ static constexpr int kRootsExternalReferenceTableOffset =
+ kRootListLength * kPointerSize;
+ ExternalReferenceTable external_reference_table_;
+
size_t code_range_size_;
size_t max_semi_space_size_;
size_t initial_semispace_size_;
@@ -2385,8 +2203,8 @@ class Heap {
// and reset by a mark-compact garbage collection.
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
- v8::debug::OutOfMemoryCallback out_of_memory_callback_;
- void* out_of_memory_callback_data_;
+ std::vector<std::pair<v8::NearHeapLimitCallback, void*> >
+ near_heap_limit_callbacks_;
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2401,6 +2219,7 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
LargeObjectSpace* lo_space_;
+ ReadOnlySpace* read_only_space_;
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
@@ -2446,6 +2265,10 @@ class Heap {
// How many gc happened.
unsigned int gc_count_;
+ // The number of Mark-Compact garbage collections that are considered as
+ // ineffective. See IsIneffectiveMarkCompact() predicate.
+ int consecutive_ineffective_mark_compacts_;
+
static const uintptr_t kMmapRegionMask = 0xFFFFFFFFu;
uintptr_t mmap_region_base_;
@@ -2590,14 +2413,16 @@ class Heap {
bool fast_promotion_mode_;
- bool use_tasks_;
-
// Used for testing purposes.
bool force_oom_;
bool delay_sweeper_tasks_for_testing_;
HeapObject* pending_layout_change_object_;
+ base::Mutex unprotected_memory_chunks_mutex_;
+ std::unordered_set<MemoryChunk*> unprotected_memory_chunks_;
+ bool unprotected_memory_chunks_registry_enabled_;
+
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
@@ -2614,6 +2439,8 @@ class Heap {
// stores the option of the corresponding target.
std::map<int, RetainingPathOption> retaining_path_target_option_;
+ std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
+
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
friend class ConcurrentMarking;
@@ -2704,6 +2531,18 @@ class CodeSpaceMemoryModificationScope {
Heap* heap_;
};
+// The CodePageCollectionMemoryModificationScope can only be used by the main
+// thread. It will not be enabled if a CodeSpaceMemoryModificationScope is
+// already active.
+class CodePageCollectionMemoryModificationScope {
+ public:
+ explicit inline CodePageCollectionMemoryModificationScope(Heap* heap);
+ inline ~CodePageCollectionMemoryModificationScope();
+
+ private:
+ Heap* heap_;
+};
+
// The CodePageMemoryModificationScope does not check if tansitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads.
@@ -2729,11 +2568,14 @@ class CodePageMemoryModificationScope {
class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override;
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override;
- private:
- void VerifyPointers(Object** start, Object** end);
+ protected:
+ virtual void VerifyPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end);
};
@@ -2745,11 +2587,17 @@ class VerifySmisVisitor : public RootVisitor {
};
// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old space, code space and cell space. Returns
-// each space in turn, and null when it is done.
+// space, old space, code space and optionally read only space. Returns each
+// space in turn, and null when it is done.
class V8_EXPORT_PRIVATE PagedSpaces BASE_EMBEDDED {
public:
- explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
+ enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
+
+ explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
+ SpacesSpecifier::kSweepablePagedSpaces)
+ : heap_(heap),
+ counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
+ : OLD_SPACE) {}
PagedSpace* next();
private:
@@ -2865,6 +2713,16 @@ class AllocationObserver {
V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
+// -----------------------------------------------------------------------------
+// Allows observation of heap object allocations.
+class HeapObjectAllocationTracker {
+ public:
+ virtual void AllocationEvent(Address addr, int size) = 0;
+ virtual void MoveEvent(Address from, Address to, int size) {}
+ virtual void UpdateObjectSizeEvent(Address addr, int size) {}
+ virtual ~HeapObjectAllocationTracker() = default;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 84f31ef350..b64c203a8d 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -7,6 +7,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
+#include "src/objects/maybe-object.h"
namespace v8 {
namespace internal {
@@ -14,8 +15,21 @@ namespace internal {
void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
Object* value) {
- if (IsMarking() && value->IsHeapObject()) {
- RecordWriteSlow(obj, slot, value);
+ DCHECK_IMPLIES(slot != nullptr, !HasWeakHeapObjectTag(*slot));
+ DCHECK(!HasWeakHeapObjectTag(value));
+ RecordMaybeWeakWrite(obj, reinterpret_cast<MaybeObject**>(slot),
+ reinterpret_cast<MaybeObject*>(value));
+}
+
+void IncrementalMarking::RecordMaybeWeakWrite(HeapObject* obj,
+ MaybeObject** slot,
+ MaybeObject* value) {
+ // When writing a weak reference, treat it as strong for the purposes of the
+ // marking barrier.
+ HeapObject* heap_object;
+ if (IsMarking() && value->ToStrongOrWeakHeapObject(&heap_object)) {
+ RecordWriteSlow(obj, reinterpret_cast<HeapObjectReference**>(slot),
+ heap_object);
}
}
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index fa6082ae7c..7583aaaadf 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -21,7 +21,7 @@ void IncrementalMarkingJob::Start(Heap* heap) {
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
- if (!task_pending_ && heap->use_tasks()) {
+ if (!task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
@@ -49,7 +49,7 @@ void IncrementalMarkingJob::Task::RunInternal() {
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
- heap->StartIncrementalMarking(Heap::kNoGCFlags,
+ heap->StartIncrementalMarking(heap->GCFlagsForIncrementalMarking(),
GarbageCollectionReason::kIdleTask,
kGCCallbackScheduleIdleGarbageCollection);
}
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index a7b56e4315..2b693ed44e 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -53,9 +53,11 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
}
IncrementalMarking::IncrementalMarking(
- Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist)
+ Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
+ WeakObjects* weak_objects)
: heap_(heap),
marking_worklist_(marking_worklist),
+ weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
bytes_marked_concurrently_(0),
@@ -91,8 +93,8 @@ bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
return is_compacting_ && need_recording;
}
-
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
+ HeapObjectReference** slot,
Object* value) {
if (BaseRecordWrite(obj, value) && slot != nullptr) {
// Object is not going to be rescanned we need to record the slot.
@@ -422,7 +424,7 @@ void IncrementalMarking::StartMarking() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
- if (FLAG_concurrent_marking && heap_->use_tasks()) {
+ if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
heap_->concurrent_marking()->ScheduleTasks();
}
@@ -558,8 +560,6 @@ void IncrementalMarking::FinalizeIncrementally() {
// objects to reduce the marking load in the final pause.
// 1) We scan and mark the roots again to find all changes to the root set.
// 2) Age and retain maps embedded in optimized code.
- // 3) Remove weak cell with live values from the list of weak cells, they
- // do not need processing during GC.
MarkRoots();
// Map retaining is needed for perfromance, not correctness,
@@ -588,8 +588,12 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
Map* filler_map = heap_->one_pointer_filler_map();
+#ifdef ENABLE_MINOR_MC
MinorMarkCompactCollector::MarkingState* minor_marking_state =
heap()->minor_mark_compact_collector()->marking_state();
+#else
+ void* minor_marking_state = nullptr;
+#endif // ENABLE_MINOR_MC
marking_worklist()->Update([this, filler_map, minor_marking_state](
HeapObject* obj, HeapObject** out) -> bool {
@@ -613,19 +617,24 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
// The object may be on a page that was moved in new space.
DCHECK(
Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
+#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
+#endif // ENABLE_MINOR_MC
return false;
} else {
- // The object may be on a page that was moved from new to old space.
+ // The object may be on a page that was moved from new to old space. Only
+ // applicable during minor MC garbage collections.
if (Page::FromAddress(obj->address())
->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+#ifdef ENABLE_MINOR_MC
if (minor_marking_state->IsGrey(obj)) {
*out = obj;
return true;
}
+#endif // ENABLE_MINOR_MC
return false;
}
DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
@@ -638,6 +647,47 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return false;
}
});
+
+ UpdateWeakReferencesAfterScavenge();
+}
+
+void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
+ weak_objects_->weak_references.Update(
+ [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
+ std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
+ HeapObject* heap_obj = slot_in.first;
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ ptrdiff_t distance_to_slot =
+ reinterpret_cast<Address>(slot_in.second) -
+ reinterpret_cast<Address>(slot_in.first);
+ Address new_slot =
+ reinterpret_cast<Address>(map_word.ToForwardingAddress()) +
+ distance_to_slot;
+ slot_out->first = map_word.ToForwardingAddress();
+ slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
+ return true;
+ }
+ if (heap_obj->GetHeap()->InNewSpace(heap_obj)) {
+ // The new space object containing the weak reference died.
+ return false;
+ }
+ *slot_out = slot_in;
+ return true;
+ });
+ weak_objects_->weak_objects_in_code.Update(
+ [](std::pair<HeapObject*, Code*> slot_in,
+ std::pair<HeapObject*, Code*>* slot_out) -> bool {
+ HeapObject* heap_obj = slot_in.first;
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ slot_out->first = map_word.ToForwardingAddress();
+ slot_out->second = slot_in.second;
+ } else {
+ *slot_out = slot_in;
+ }
+ return true;
+ });
}
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
@@ -908,7 +958,7 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
const size_t kTargetStepCountAtOOM = 32;
size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
- if (heap()->IsCloseToOutOfMemory(oom_slack)) {
+ if (!heap()->CanExpandOldGeneration(oom_slack)) {
return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 87a1751fd9..b9f6a66444 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -87,7 +87,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
#endif
IncrementalMarking(Heap* heap,
- MarkCompactCollector::MarkingWorklist* marking_worklist);
+ MarkCompactCollector::MarkingWorklist* marking_worklist,
+ WeakObjects* weak_objects);
MarkingState* marking_state() { return &marking_state_; }
@@ -165,6 +166,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void FinalizeIncrementally();
void UpdateMarkingWorklistAfterScavenge();
+ void UpdateWeakReferencesAfterScavenge();
void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
void Hurry();
@@ -205,11 +207,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// the incremental cycle (stays white).
V8_INLINE bool BaseRecordWrite(HeapObject* obj, Object* value);
V8_INLINE void RecordWrite(HeapObject* obj, Object** slot, Object* value);
+ V8_INLINE void RecordMaybeWeakWrite(HeapObject* obj, MaybeObject** slot,
+ MaybeObject* value);
V8_INLINE void RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
Object* value);
V8_INLINE void RecordWrites(HeapObject* obj);
- void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+ void RecordWriteSlow(HeapObject* obj, HeapObjectReference** slot,
+ Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
// Returns true if the function succeeds in transitioning the object
@@ -324,6 +329,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
Heap* const heap_;
MarkCompactCollector::MarkingWorklist* const marking_worklist_;
+ WeakObjects* weak_objects_;
double start_time_ms_;
size_t initial_old_generation_size_;
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 8ca289cf1a..577c4a5576 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -61,7 +61,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// we can return true here.
return true;
}
- return invalidated_object_->IsValidSlot(offset);
+ return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
}
} // namespace internal
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 1c8d4c8ac4..e909ef69d7 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -90,10 +90,11 @@ void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
: 0;
CancelableTaskManager::Id* task_ids =
new CancelableTaskManager::Id[num_tasks];
- Task* main_task = nullptr;
+ std::unique_ptr<Task> main_task;
for (size_t i = 0, start_index = 0; i < num_tasks;
i++, start_index += items_per_task + (i < items_remainder ? 1 : 0)) {
- Task* task = tasks_[i];
+ auto task = std::move(tasks_[i]);
+ DCHECK(task);
// By definition there are less |items_remainder| to distribute then
// there are tasks processing items so this cannot overflow while we are
@@ -105,16 +106,15 @@ void ItemParallelJob::Run(std::shared_ptr<Counters> async_counters) {
: base::Optional<AsyncTimedHistogram>());
task_ids[i] = task->id();
if (i > 0) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallBlockingTaskOnWorkerThread(std::move(task));
} else {
- main_task = task;
+ main_task = std::move(task);
}
}
// Contribute on main thread.
+ DCHECK(main_task);
main_task->Run();
- delete main_task;
// Wait for background tasks.
for (size_t i = 0; i < num_tasks; i++) {
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 4c21f69ca9..51e0afd401 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -126,7 +126,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
~ItemParallelJob();
// Adds a task to the job. Transfers ownership to the job.
- void AddTask(Task* task) { tasks_.push_back(task); }
+ void AddTask(Task* task) { tasks_.push_back(std::unique_ptr<Task>(task)); }
// Adds an item to the job. Transfers ownership to the job.
void AddItem(Item* item) { items_.push_back(item); }
@@ -140,7 +140,7 @@ class V8_EXPORT_PRIVATE ItemParallelJob {
private:
std::vector<Item*> items_;
- std::vector<Task*> tasks_;
+ std::vector<std::unique_ptr<Task>> tasks_;
CancelableTaskManager* cancelable_task_manager_;
base::Semaphore* pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a6bbecd88e..c21c7dda6e 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -28,7 +28,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitAllocationSite(Map* map,
AllocationSite* object) {
int size = AllocationSite::BodyDescriptorWeak::SizeOf(map, object);
- AllocationSite::BodyDescriptorWeak::IterateBody(object, size, this);
+ AllocationSite::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -38,7 +38,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitBytecodeArray(Map* map,
BytecodeArray* array) {
int size = BytecodeArray::BodyDescriptor::SizeOf(map, array);
- BytecodeArray::BodyDescriptor::IterateBody(array, size, this);
+ BytecodeArray::BodyDescriptor::IterateBody(map, array, size, this);
array->MakeOlder();
return size;
}
@@ -48,7 +48,7 @@ template <FixedArrayVisitationMode fixed_array_mode,
int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitCodeDataContainer(Map* map, CodeDataContainer* object) {
int size = CodeDataContainer::BodyDescriptorWeak::SizeOf(map, object);
- CodeDataContainer::BodyDescriptorWeak::IterateBody(object, size, this);
+ CodeDataContainer::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -71,7 +71,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
heap_->TracePossibleWrapper(object);
}
int size = JSObject::BodyDescriptor::SizeOf(map, object);
- JSObject::BodyDescriptor::IterateBody(object, size, this);
+ JSObject::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -81,7 +81,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitJSFunction(Map* map,
JSFunction* object) {
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, this);
+ JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, this);
return size;
}
@@ -98,7 +98,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// Skip visiting the backing hash table containing the mappings and the
// pointer to the other enqueued weak collections, both are post-processed.
int size = JSWeakCollection::BodyDescriptorWeak::SizeOf(map, weak_collection);
- JSWeakCollection::BodyDescriptorWeak::IterateBody(weak_collection, size,
+ JSWeakCollection::BodyDescriptorWeak::IterateBody(map, weak_collection, size,
this);
// Partially initialized weak collection is enqueued, but table is ignored.
@@ -119,14 +119,13 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitMap(Map* map, Map* object) {
// When map collection is enabled we have to mark through map's transitions
// and back pointers in a special way to make these links weak.
+ int size = Map::BodyDescriptor::SizeOf(map, object);
if (object->CanTransition()) {
MarkMapContents(object);
} else {
- VisitPointers(object,
- HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+ Map::BodyDescriptor::IterateBody(map, object, size, this);
}
- return Map::BodyDescriptor::SizeOf(map, object);
+ return size;
}
template <FixedArrayVisitationMode fixed_array_mode,
@@ -135,7 +134,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitNativeContext(Map* map,
Context* context) {
int size = Context::BodyDescriptorWeak::SizeOf(map, context);
- Context::BodyDescriptorWeak::IterateBody(context, size, this);
+ Context::BodyDescriptorWeak::IterateBody(map, context, size, this);
return size;
}
@@ -145,7 +144,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitTransitionArray(Map* map,
TransitionArray* array) {
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
- TransitionArray::BodyDescriptor::IterateBody(array, size, this);
+ TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
collector_->AddTransitionArray(array);
return size;
}
@@ -187,6 +186,32 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointer(HeapObject* host,
+ MaybeObject** p) {
+ HeapObject* target_object;
+ if ((*p)->ToStrongHeapObject(&target_object)) {
+ collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
+ target_object);
+ MarkObject(host, target_object);
+ } else if ((*p)->ToWeakHeapObject(&target_object)) {
+ if (marking_state()->IsBlackOrGrey(target_object)) {
+ // Weak references with live values are directly processed here to reduce
+ // the processing time of weak cells during the main GC pause.
+ collector_->RecordSlot(host, reinterpret_cast<HeapObjectReference**>(p),
+ target_object);
+ } else {
+ // If we do not know about liveness of values of weak cells, we have to
+ // process them when we know the liveness of the whole transitive
+ // closure.
+ collector_->AddWeakReference(host,
+ reinterpret_cast<HeapObjectReference**>(p));
+ }
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitPointers(HeapObject* host,
Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
@@ -197,6 +222,17 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
+ MarkingState>::VisitPointers(HeapObject* host,
+ MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+}
+
+template <FixedArrayVisitationMode fixed_array_mode,
+ TraceRetainingPathMode retaining_path_mode, typename MarkingState>
+void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitEmbeddedPointer(Code* host,
RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
@@ -204,6 +240,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
collector_->RecordRelocSlot(host, rinfo, object);
if (!host->IsWeakObject(object)) {
MarkObject(host, object);
+ } else if (!marking_state()->IsBlackOrGrey(object)) {
+ collector_->AddWeakObjectInCode(object, host);
}
}
@@ -288,7 +326,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
}
}
} else {
- FixedArray::BodyDescriptor::IterateBody(object, object_size, this);
+ FixedArray::BodyDescriptor::IterateBody(map, object, object_size, this);
}
return object_size;
}
@@ -320,8 +358,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode,
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
- VisitPointers(map, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
- HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+ Map::BodyDescriptor::IterateBody(
+ map->map(), map, Map::BodyDescriptor::SizeOf(map->map(), map), this);
}
void MarkCompactCollector::MarkObject(HeapObject* host, HeapObject* obj) {
@@ -353,6 +391,12 @@ void MarkCompactCollector::MarkExternallyReferencedObject(HeapObject* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
Object* target) {
+ RecordSlot(object, reinterpret_cast<HeapObjectReference**>(slot), target);
+}
+
+void MarkCompactCollector::RecordSlot(HeapObject* object,
+ HeapObjectReference** slot,
+ Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index c6c8c29962..eedc942835 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -62,6 +62,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
virtual bool IsMarked(HeapObject* object) = 0;
@@ -71,6 +72,11 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointers(start, end);
+ }
+
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
VerifyPointers(start, end);
@@ -182,6 +188,15 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongHeapObject(&object)) {
+ CHECK(marking_state_->IsBlackOrGrey(object));
+ }
+ }
+ }
+
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!host->IsWeakObject(rinfo->target_object())) {
@@ -194,44 +209,6 @@ class FullMarkingVerifier : public MarkingVerifier {
MarkCompactCollector::NonAtomicMarkingState* marking_state_;
};
-class YoungGenerationMarkingVerifier : public MarkingVerifier {
- public:
- explicit YoungGenerationMarkingVerifier(Heap* heap)
- : MarkingVerifier(heap),
- marking_state_(
- heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
-
- Bitmap* bitmap(const MemoryChunk* chunk) override {
- return marking_state_->bitmap(chunk);
- }
-
- bool IsMarked(HeapObject* object) override {
- return marking_state_->IsGrey(object);
- }
-
- bool IsBlackOrGrey(HeapObject* object) override {
- return marking_state_->IsBlackOrGrey(object);
- }
-
- void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
- VerifyMarking(heap_->new_space());
- }
-
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- if (!heap_->InNewSpace(object)) return;
- CHECK(IsMarked(object));
- }
- }
- }
-
- private:
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
@@ -240,6 +217,11 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyPointers(start, end);
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ VerifyPointers(start, end);
+ }
+
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
VerifyPointers(start, end);
@@ -251,6 +233,7 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
inline Heap* heap() { return heap_; }
virtual void VerifyPointers(Object** start, Object** end) = 0;
+ virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
@@ -320,27 +303,14 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
-};
-
-class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
- public:
- explicit YoungGenerationEvacuationVerifier(Heap* heap)
- : EvacuationVerifier(heap) {}
-
- void Run() override {
- VerifyRoots(VISIT_ALL_IN_SCAVENGE);
- VerifyEvacuation(heap_->new_space());
- VerifyEvacuation(heap_->old_space());
- VerifyEvacuation(heap_->code_space());
- VerifyEvacuation(heap_->map_space());
- }
-
- protected:
- void VerifyPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongHeapObject(&object)) {
+ if (heap()->InNewSpace(object)) {
+ CHECK(heap()->InToSpace(object));
+ }
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
}
}
}
@@ -408,16 +378,10 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
};
int NumberOfAvailableCores() {
- static int num_cores =
- static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
- 1;
+ static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
DCHECK_GE(num_cores, 1);
- DCHECK_EQ(
- num_cores,
- 1 + static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+ DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
return num_cores;
}
@@ -425,7 +389,14 @@ int NumberOfAvailableCores() {
int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
DCHECK_GT(pages, 0);
- return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+ int tasks =
+ FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+ if (!heap_->CanExpandOldGeneration(
+ static_cast<size_t>(tasks * Page::kPageSize))) {
+ // Optimize for memory usage near the heap limit.
+ tasks = 1;
+ }
+ return tasks;
}
int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
@@ -452,16 +423,6 @@ int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
: 1;
}
-int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
- DCHECK_GT(pages, 0);
- if (!FLAG_minor_mc_parallel_marking) return 1;
- // Pages are not private to markers but we can still use them to estimate the
- // amount of marking that is required.
- const int kPagesPerTask = 2;
- const int wanted_tasks = Max(1, pages / kPagesPerTask);
- return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
-}
-
MarkCompactCollector::MarkCompactCollector(Heap* heap)
: MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
@@ -487,8 +448,6 @@ void MarkCompactCollector::SetUp() {
DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
}
-void MinorMarkCompactCollector::SetUp() {}
-
void MarkCompactCollector::TearDown() {
AbortCompaction();
AbortWeakObjects();
@@ -497,8 +456,6 @@ void MarkCompactCollector::TearDown() {
}
}
-void MinorMarkCompactCollector::TearDown() {}
-
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
@@ -542,7 +499,9 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
+#ifdef ENABLE_MINOR_MC
heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
+#endif // ENABLE_MINOR_MC
MarkLiveObjects();
ClearNonLiveReferences();
@@ -558,6 +517,13 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
+void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
+ HeapObjectIterator iterator(space);
+ while (HeapObject* object = iterator.Next()) {
+ CHECK(non_atomic_marking_state()->IsBlack(object));
+ }
+}
+
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
@@ -579,6 +545,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
+ // Read-only space should always be black since we never collect any objects
+ // in it or linked from it.
+ VerifyMarkbitsAreDirty(heap_->read_only_space());
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
@@ -588,17 +557,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
}
-void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
- HeapObjectIterator code_iterator(heap()->code_space());
- for (HeapObject* obj = code_iterator.Next(); obj != nullptr;
- obj = code_iterator.Next()) {
- Code* code = Code::cast(obj);
- if (!code->is_optimized_code()) continue;
- if (WillBeDeoptimized(code)) continue;
- code->VerifyEmbeddedObjectsDependency();
- }
-}
-
#endif // VERIFY_HEAP
void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
@@ -936,9 +894,6 @@ void MarkCompactCollector::Finish() {
sweeper()->StartSweeperTasks();
sweeper()->StartIterabilityTasks();
- // The hashing of weak_object_to_code_table is no longer valid.
- heap()->weak_object_to_code_table()->Rehash();
-
// Clear the marking state of live large objects.
heap_->lo_space()->ClearMarkingStateOfLiveObjects();
@@ -962,16 +917,6 @@ void MarkCompactCollector::Finish() {
}
}
-void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
- for (Page* p : sweep_to_iterate_pages_) {
- if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- non_atomic_marking_state()->ClearLiveness(p);
- }
- }
- sweep_to_iterate_pages_.clear();
-}
-
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
@@ -1016,7 +961,16 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
}
void VisitPointers(HeapObject* host, Object** start, Object** end) final {
- for (Object** p = start; p < end; p++) MarkObject(host, *p);
+ for (Object** p = start; p < end; p++) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
+ MarkObject(host, *p);
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ // At the moment, custom roots cannot contain weak pointers.
+ UNREACHABLE();
}
// VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
@@ -1057,6 +1011,11 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
}
}
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ UNREACHABLE();
+ }
+
int PointersRemoved() {
return pointers_removed_;
}
@@ -1099,68 +1058,6 @@ class ExternalStringTableCleaner : public RootVisitor {
Heap* heap_;
};
-// Helper class for pruning the string table.
-class YoungGenerationExternalStringTableCleaner : public RootVisitor {
- public:
- YoungGenerationExternalStringTableCleaner(
- MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(static_cast<int>(root),
- static_cast<int>(Root::kExternalStringsTable));
- // Visit all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) {
- Object* o = *p;
- if (o->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(o);
- if (marking_state_->IsWhite(heap_object)) {
- if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
- } else {
- // The original external string may have been internalized.
- DCHECK(o->IsThinString());
- }
- // Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
- }
- }
- }
- }
-
- private:
- Heap* heap_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-// Marked young generation objects and all old generation objects will be
-// retained.
-class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
- public:
- explicit MinorMarkCompactWeakObjectRetainer(
- MinorMarkCompactCollector* collector)
- : heap_(collector->heap()),
- marking_state_(collector->non_atomic_marking_state()) {}
-
- virtual Object* RetainAs(Object* object) {
- HeapObject* heap_object = HeapObject::cast(object);
- if (!heap_->InNewSpace(heap_object)) return object;
-
- // Young generation marking only marks to grey instead of black.
- DCHECK(!marking_state_->IsBlack(heap_object));
- if (marking_state_->IsGrey(heap_object)) {
- return object;
- }
- return nullptr;
- }
-
- private:
- Heap* heap_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
@@ -1197,13 +1094,27 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
: collector_(collector) {}
inline void VisitPointer(HeapObject* host, Object** p) final {
+ DCHECK(!HasWeakHeapObjectTag(*p));
+ RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
+ reinterpret_cast<Address>(p));
+ }
+
+ inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
}
inline void VisitPointers(HeapObject* host, Object** start,
Object** end) final {
while (start < end) {
- RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
+ VisitPointer(host, start);
+ ++start;
+ }
+ }
+
+ inline void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ while (start < end) {
+ VisitPointer(host, start);
++start;
}
}
@@ -1233,9 +1144,9 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
protected:
- inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
+ inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
Address slot) {
- if (value->IsHeapObject()) {
+ if (value->IsStrongOrWeakHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
DCHECK_IMPLIES(p->InToSpace(),
@@ -1278,65 +1189,6 @@ class ProfilingMigrationObserver final : public MigrationObserver {
}
};
-class YoungGenerationMigrationObserver final : public MigrationObserver {
- public:
- YoungGenerationMigrationObserver(Heap* heap,
- MarkCompactCollector* mark_compact_collector)
- : MigrationObserver(heap),
- mark_compact_collector_(mark_compact_collector) {}
-
- inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
- int size) final {
- // Migrate color to old generation marking in case the object survived young
- // generation garbage collection.
- if (heap_->incremental_marking()->IsMarking()) {
- DCHECK(
- heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
- heap_->incremental_marking()->TransferColor(src, dst);
- }
- }
-
- protected:
- base::Mutex mutex_;
- MarkCompactCollector* mark_compact_collector_;
-};
-
-class YoungGenerationRecordMigratedSlotVisitor final
- : public RecordMigratedSlotVisitor {
- public:
- explicit YoungGenerationRecordMigratedSlotVisitor(
- MarkCompactCollector* collector)
- : RecordMigratedSlotVisitor(collector) {}
-
- void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
- void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
- UNREACHABLE();
- }
-
- private:
- // Only record slots for host objects that are considered as live by the full
- // collector.
- inline bool IsLive(HeapObject* object) {
- return collector_->non_atomic_marking_state()->IsBlack(object);
- }
-
- inline void RecordMigratedSlot(HeapObject* host, Object* value,
- Address slot) final {
- if (value->IsHeapObject()) {
- Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
- if (p->InNewSpace()) {
- DCHECK_IMPLIES(p->InToSpace(),
- p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
- RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
- } else if (p->IsEvacuationCandidate() && IsLive(host)) {
- RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
- Page::FromAddress(slot), slot);
- }
- }
- }
-};
-
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
@@ -1371,16 +1223,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map()->instance_type(), size,
- base->record_visitor_);
+ dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
Code::cast(dst)->Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map()->instance_type(), size,
- base->record_visitor_);
+ dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1532,8 +1382,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
AllocationResult allocation =
local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
if (allocation.IsRetry()) {
- v8::internal::Heap::FatalProcessOutOfMemory(
- "MarkCompactCollector: semi-space copy, fallback in old gen", true);
+ heap_->FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen");
}
return allocation;
}
@@ -1616,7 +1466,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject* object, int size) {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
- object->IterateBody(&visitor);
+ object->IterateBodyFast(&visitor);
return true;
}
@@ -1699,7 +1549,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
- Code::BodyDescriptor::IterateBody(code, visitor);
+ Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
}
return;
}
@@ -1731,598 +1581,6 @@ void MarkCompactCollector::RecordObjectStats() {
}
}
-class YoungGenerationMarkingVisitor final
- : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
- public:
- YoungGenerationMarkingVisitor(
- Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : heap_(heap),
- worklist_(global_worklist, task_id),
- marking_state_(marking_state) {}
-
- V8_INLINE void VisitPointers(HeapObject* host, Object** start,
- Object** end) final {
- for (Object** p = start; p < end; p++) {
- VisitPointer(host, p);
- }
- }
-
- V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
- Object* target = *slot;
- if (heap_->InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- MarkObjectViaMarkingWorklist(target_object);
- }
- }
-
- private:
- inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
- if (marking_state_->WhiteToGrey(object)) {
- // Marking deque overflow is unsupported for the young generation.
- CHECK(worklist_.Push(object));
- }
- }
-
- Heap* heap_;
- MinorMarkCompactCollector::MarkingWorklist::View worklist_;
- MinorMarkCompactCollector::MarkingState* marking_state_;
-};
-
-class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
- public:
- explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
- : collector_(collector),
- marking_state_(collector_->non_atomic_marking_state()) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- MarkObjectByPointer(p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- private:
- void MarkObjectByPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* object = HeapObject::cast(*p);
-
- if (!collector_->heap()->InNewSpace(object)) return;
-
- if (marking_state_->WhiteToGrey(object)) {
- collector_->main_marking_visitor()->Visit(object);
- collector_->ProcessMarkingWorklist();
- }
- }
-
- MinorMarkCompactCollector* collector_;
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
-};
-
-class MarkingItem;
-class GlobalHandlesMarkingItem;
-class PageMarkingItem;
-class RootMarkingItem;
-class YoungGenerationMarkingTask;
-
-class MarkingItem : public ItemParallelJob::Item {
- public:
- virtual ~MarkingItem() {}
- virtual void Process(YoungGenerationMarkingTask* task) = 0;
-};
-
-class YoungGenerationMarkingTask : public ItemParallelJob::Task {
- public:
- YoungGenerationMarkingTask(
- Isolate* isolate, MinorMarkCompactCollector* collector,
- MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
- : ItemParallelJob::Task(isolate),
- collector_(collector),
- marking_worklist_(global_worklist, task_id),
- marking_state_(collector->marking_state()),
- visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
- local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
- Page::kPageSize);
- }
-
- void RunInParallel() override {
- TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
- GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
- double marking_time = 0.0;
- {
- TimedScope scope(&marking_time);
- MarkingItem* item = nullptr;
- while ((item = GetItem<MarkingItem>()) != nullptr) {
- item->Process(this);
- item->MarkFinished();
- EmptyLocalMarkingWorklist();
- }
- EmptyMarkingWorklist();
- DCHECK(marking_worklist_.IsLocalEmpty());
- FlushLiveBytes();
- }
- if (FLAG_trace_minor_mc_parallel_marking) {
- PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
- static_cast<void*>(this), marking_time);
- }
- };
-
- void MarkObject(Object* object) {
- if (!collector_->heap()->InNewSpace(object)) return;
- HeapObject* heap_object = HeapObject::cast(object);
- if (marking_state_->WhiteToGrey(heap_object)) {
- const int size = visitor_.Visit(heap_object);
- IncrementLiveBytes(heap_object, size);
- }
- }
-
- private:
- void EmptyLocalMarkingWorklist() {
- HeapObject* object = nullptr;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
- void EmptyMarkingWorklist() {
- HeapObject* object = nullptr;
- while (marking_worklist_.Pop(&object)) {
- const int size = visitor_.Visit(object);
- IncrementLiveBytes(object, size);
- }
- }
-
- void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
- local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
- bytes;
- }
-
- void FlushLiveBytes() {
- for (auto pair : local_live_bytes_) {
- marking_state_->IncrementLiveBytes(pair.first, pair.second);
- }
- }
-
- MinorMarkCompactCollector* collector_;
- MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
- MinorMarkCompactCollector::MarkingState* marking_state_;
- YoungGenerationMarkingVisitor visitor_;
- std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
-};
-
-class BatchedRootMarkingItem : public MarkingItem {
- public:
- explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
- : objects_(objects) {}
- virtual ~BatchedRootMarkingItem() {}
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "BatchedRootMarkingItem::Process");
- for (Object* object : objects_) {
- task->MarkObject(object);
- }
- }
-
- private:
- std::vector<Object*> objects_;
-};
-
-class PageMarkingItem : public MarkingItem {
- public:
- explicit PageMarkingItem(MemoryChunk* chunk,
- base::AtomicNumber<intptr_t>* global_slots)
- : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
- virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "PageMarkingItem::Process");
- base::LockGuard<base::Mutex> guard(chunk_->mutex());
- MarkUntypedPointers(task);
- MarkTypedPointers(task);
- }
-
- private:
- inline Heap* heap() { return chunk_->heap(); }
-
- void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
- RememberedSet<OLD_TO_NEW>::Iterate(
- chunk_,
- [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
-
- void MarkTypedPointers(YoungGenerationMarkingTask* task) {
- Isolate* isolate = heap()->isolate();
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
- Address slot) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [this, task](Object** slot) {
- return CheckAndMarkObject(task,
- reinterpret_cast<Address>(slot));
- });
- });
- }
-
- SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
- Address slot_address) {
- Object* object = *reinterpret_cast<Object**>(slot_address);
- if (heap()->InNewSpace(object)) {
- // Marking happens before flipping the young generation, so the object
- // has to be in ToSpace.
- DCHECK(heap()->InToSpace(object));
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- task->MarkObject(heap_object);
- slots_++;
- return KEEP_SLOT;
- }
- return REMOVE_SLOT;
- }
-
- MemoryChunk* chunk_;
- base::AtomicNumber<intptr_t>* global_slots_;
- intptr_t slots_;
-};
-
-class GlobalHandlesMarkingItem : public MarkingItem {
- public:
- GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
- size_t end)
- : global_handles_(global_handles), start_(start), end_(end) {}
- virtual ~GlobalHandlesMarkingItem() {}
-
- void Process(YoungGenerationMarkingTask* task) override {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "GlobalHandlesMarkingItem::Process");
- GlobalHandlesRootMarkingVisitor visitor(task);
- global_handles_
- ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
- &visitor, start_, end_);
- }
-
- private:
- class GlobalHandlesRootMarkingVisitor : public RootVisitor {
- public:
- explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
- : task_(task) {}
-
- void VisitRootPointer(Root root, const char* description,
- Object** p) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- task_->MarkObject(*p);
- }
-
- void VisitRootPointers(Root root, const char* description, Object** start,
- Object** end) override {
- DCHECK_EQ(Root::kGlobalHandles, root);
- for (Object** p = start; p < end; p++) {
- task_->MarkObject(*p);
- }
- }
-
- private:
- YoungGenerationMarkingTask* task_;
- };
-
- GlobalHandles* global_handles_;
- size_t start_;
- size_t end_;
-};
-
-MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
- : MarkCompactCollectorBase(heap),
- worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
- main_marking_visitor_(new YoungGenerationMarkingVisitor(
- heap, marking_state(), worklist_, kMainMarker)),
- page_parallel_job_semaphore_(0) {
- static_assert(
- kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
- "more marker tasks than marking deque can handle");
-}
-
-MinorMarkCompactCollector::~MinorMarkCompactCollector() {
- delete worklist_;
- delete main_marking_visitor_;
-}
-
-static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
- DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
- return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
- ->non_atomic_marking_state()
- ->IsGrey(HeapObject::cast(*p));
-}
-
-template <class ParallelItem>
-static void SeedGlobalHandles(GlobalHandles* global_handles,
- ItemParallelJob* job) {
- // Create batches of global handles.
- const size_t kGlobalHandlesBufferSize = 1000;
- const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
- for (size_t start = 0; start < new_space_nodes;
- start += kGlobalHandlesBufferSize) {
- size_t end = start + kGlobalHandlesBufferSize;
- if (end > new_space_nodes) end = new_space_nodes;
- job->AddItem(new ParallelItem(global_handles, start, end));
- }
-}
-
-void MinorMarkCompactCollector::MarkRootSetInParallel() {
- base::AtomicNumber<intptr_t> slots;
- {
- ItemParallelJob job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- // Seed the root set (roots + old->new set).
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
- // Create batches of roots.
- RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
- &job);
- heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
- &job);
- // Create items for each page.
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [&job, &slots](MemoryChunk* chunk) {
- job.AddItem(new PageMarkingItem(chunk, &slots));
- });
- // Flush any remaining objects in the seeding visitor.
- root_seed_visitor.FlushObjects();
- }
-
- // Add tasks and run in parallel.
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- const int new_space_pages =
- static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
- const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
- for (int i = 0; i < num_tasks; i++) {
- job.AddTask(
- new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
- }
- job.Run(isolate()->async_counters());
- DCHECK(worklist()->IsGlobalEmpty());
- }
- }
- old_to_new_slots_ = static_cast<int>(slots.Value());
-}
-
-void MinorMarkCompactCollector::MarkLiveObjects() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
-
- PostponeInterruptsScope postpone(isolate());
-
- RootMarkingVisitor root_visitor(this);
-
- MarkRootSetInParallel();
-
- // Mark rest on the main thread.
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
- heap()->IterateEncounteredWeakCollections(&root_visitor);
- ProcessMarkingWorklist();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnmarkedObjectForYoungGeneration);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
- &root_visitor, &IsUnmarkedObjectForYoungGeneration);
- ProcessMarkingWorklist();
- }
-}
-
-void MinorMarkCompactCollector::ProcessMarkingWorklist() {
- MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
- HeapObject* object = nullptr;
- while (marking_worklist.Pop(&object)) {
- DCHECK(!object->IsFiller());
- DCHECK(object->IsHeapObject());
- DCHECK(heap()->Contains(object));
- DCHECK(non_atomic_marking_state()->IsGrey(object));
- main_marking_visitor()->Visit(object);
- }
- DCHECK(marking_worklist.IsLocalEmpty());
-}
-
-void MinorMarkCompactCollector::CollectGarbage() {
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
- heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
- CleanupSweepToIteratePages();
- }
-
- MarkLiveObjects();
- ClearNonLiveReferences();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- YoungGenerationMarkingVerifier verifier(heap());
- verifier.Run();
- }
-#endif // VERIFY_HEAP
-
- Evacuate();
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- YoungGenerationEvacuationVerifier verifier(heap());
- verifier.Run();
- }
-#endif // VERIFY_HEAP
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
- heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
- for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
- heap()->new_space()->FromSpaceEnd())) {
- DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
- non_atomic_marking_state()->ClearLiveness(p);
- if (FLAG_concurrent_marking) {
- // Ensure that concurrent marker does not track pages that are
- // going to be unmapped.
- heap()->concurrent_marking()->ClearLiveness(p);
- }
- }
- }
-
- RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
- heap(), [](MemoryChunk* chunk) {
- if (chunk->SweepingDone()) {
- RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- } else {
- RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
- }
- });
-
- heap()->account_external_memory_concurrently_freed();
-}
-
-void MinorMarkCompactCollector::MakeIterable(
- Page* p, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode) {
- // We have to clear the full collectors markbits for the areas that we
- // remove here.
- MarkCompactCollector* full_collector = heap()->mark_compact_collector();
- Address free_start = p->area_start();
- DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
-
- for (auto object_and_size :
- LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
- HeapObject* const object = object_and_size.first;
- DCHECK(non_atomic_marking_state()->IsGrey(object));
- Address free_end = object->address();
- if (free_end != free_start) {
- CHECK_GT(free_end, free_start);
- size_t size = static_cast<size_t>(free_end - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(free_end));
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xCC, size);
- }
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
- Map* map = object->synchronized_map();
- int size = object->SizeFromMap(map);
- free_start = free_end + size;
- }
-
- if (free_start != p->area_end()) {
- CHECK_GT(p->area_end(), free_start);
- size_t size = static_cast<size_t>(p->area_end() - free_start);
- full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
- p->AddressToMarkbitIndex(free_start),
- p->AddressToMarkbitIndex(p->area_end()));
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xCC, size);
- }
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
- }
-
- if (marking_mode == MarkingTreatmentMode::CLEAR) {
- non_atomic_marking_state()->ClearLiveness(p);
- p->ClearFlag(Page::SWEEP_TO_ITERATE);
- }
-}
-
-void MinorMarkCompactCollector::ClearNonLiveReferences() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
- // Internalized strings are always stored in old space, so there is no need
- // to clean them here.
- YoungGenerationExternalStringTableCleaner external_visitor(this);
- heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
- heap()->external_string_table_.CleanUpNewSpaceStrings();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
- // Process the weak references.
- MinorMarkCompactWeakObjectRetainer retainer(this);
- heap()->ProcessYoungWeakReferences(&retainer);
- }
-}
-
-void MinorMarkCompactCollector::EvacuatePrologue() {
- NewSpace* new_space = heap()->new_space();
- // Append the list of new space pages to be processed.
- for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
- new_space_evacuation_pages_.push_back(p);
- }
- new_space->Flip();
- new_space->ResetLinearAllocationArea();
-}
-
-void MinorMarkCompactCollector::EvacuateEpilogue() {
- heap()->new_space()->set_age_mark(heap()->new_space()->top());
- // Give pages that are queued to be freed back to the OS.
- heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
-}
-
-void MinorMarkCompactCollector::Evacuate() {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
- base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
- EvacuatePrologue();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
- EvacuatePagesInParallel();
- }
-
- UpdatePointersAfterEvacuation();
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
- if (!heap()->new_space()->Rebalance()) {
- FatalProcessOutOfMemory("NewSpace::Rebalance");
- }
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
- for (Page* p : new_space_evacuation_pages_) {
- if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
- p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->SetFlag(Page::SWEEP_TO_ITERATE);
- sweep_to_iterate_pages_.push_back(p);
- }
- }
- new_space_evacuation_pages_.clear();
- }
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
- EvacuateEpilogue();
- }
-}
-
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
@@ -2430,7 +1688,6 @@ void MarkCompactCollector::MarkLiveObjects() {
}
}
-
void MarkCompactCollector::ClearNonLiveReferences() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
@@ -2463,79 +1720,34 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// ClearFullMapTransitions must be called before WeakCells are cleared.
ClearFullMapTransitions();
}
- DependentCode* dependent_code_list;
- ClearWeakCellsAndSimpleMapTransitions(&dependent_code_list);
- MarkDependentCodeForDeoptimization(dependent_code_list);
+ ClearWeakCells();
+ ClearWeakReferences();
+ MarkDependentCodeForDeoptimization();
ClearWeakCollections();
DCHECK(weak_objects_.weak_cells.IsGlobalEmpty());
DCHECK(weak_objects_.transition_arrays.IsGlobalEmpty());
+ DCHECK(weak_objects_.weak_references.IsGlobalEmpty());
+ DCHECK(weak_objects_.weak_objects_in_code.IsGlobalEmpty());
}
-
-void MarkCompactCollector::MarkDependentCodeForDeoptimization(
- DependentCode* list_head) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
- Isolate* isolate = this->isolate();
- DependentCode* current = list_head;
- while (current->length() > 0) {
- have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
- isolate, DependentCode::kWeakCodeGroup);
- current = current->next_link();
- }
-
- {
- ArrayList* list = heap_->weak_new_space_object_to_code_list();
- int counter = 0;
- for (int i = 0; i < list->Length(); i += 2) {
- WeakCell* obj = WeakCell::cast(list->Get(i));
- WeakCell* dep = WeakCell::cast(list->Get(i + 1));
- if (obj->cleared() || dep->cleared()) {
- if (!dep->cleared()) {
- Code* code = Code::cast(dep->value());
- if (!code->marked_for_deoptimization()) {
- DependentCode::SetMarkedForDeoptimization(
- code, DependentCode::DependencyGroup::kWeakCodeGroup);
- code->InvalidateEmbeddedObjects();
- have_code_to_deoptimize_ = true;
- }
- }
- } else {
- // We record the slot manually because marking is finished at this
- // point and the write barrier would bailout.
- list->Set(counter, obj, SKIP_WRITE_BARRIER);
- RecordSlot(list, list->Slot(counter), obj);
- counter++;
- list->Set(counter, dep, SKIP_WRITE_BARRIER);
- RecordSlot(list, list->Slot(counter), dep);
- counter++;
- }
- }
- }
-
- WeakHashTable* table = heap_->weak_object_to_code_table();
- uint32_t capacity = table->Capacity();
- for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key_index = table->EntryToIndex(i);
- Object* key = table->get(key_index);
- if (!table->IsKey(isolate, key)) continue;
- uint32_t value_index = table->EntryToValueIndex(i);
- Object* value = table->get(value_index);
- DCHECK(key->IsWeakCell());
- if (WeakCell::cast(key)->cleared()) {
- have_code_to_deoptimize_ |=
- DependentCode::cast(value)->MarkCodeForDeoptimization(
- isolate, DependentCode::kWeakCodeGroup);
- table->set(key_index, heap_->the_hole_value());
- table->set(value_index, heap_->the_hole_value());
- table->ElementRemoved();
+void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
+ std::pair<HeapObject*, Code*> weak_object_in_code;
+ while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
+ &weak_object_in_code)) {
+ HeapObject* object = weak_object_in_code.first;
+ Code* code = weak_object_in_code.second;
+ if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
+ !code->marked_for_deoptimization()) {
+ code->SetMarkedForDeoptimization("weak objects");
+ code->InvalidateEmbeddedObjects();
+ have_code_to_deoptimize_ = true;
}
}
}
-void MarkCompactCollector::ClearSimpleMapTransition(
- WeakCell* potential_transition, Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
Object* potential_parent = dead_target->constructor_or_backpointer();
if (potential_parent->IsMap()) {
@@ -2543,26 +1755,24 @@ void MarkCompactCollector::ClearSimpleMapTransition(
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
TransitionsAccessor(parent, &no_gc_obviously)
- .HasSimpleTransitionTo(potential_transition)) {
- ClearSimpleMapTransition(parent, dead_target);
+ .HasSimpleTransitionTo(dead_target)) {
+ ClearPotentialSimpleMapTransition(parent, dead_target);
}
}
}
-void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
- Map* dead_target) {
+void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
+ Map* dead_target) {
DCHECK(!map->is_prototype_map());
DCHECK(!dead_target->is_prototype_map());
- // Clear the useless weak cell pointer, and take ownership of the descriptor
- // array.
- map->set_raw_transitions(Smi::kZero);
+ DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
+ // Take ownership of the descriptor array.
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
if (descriptors == dead_target->instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
}
}
@@ -2571,16 +1781,21 @@ void MarkCompactCollector::ClearFullMapTransitions() {
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
int num_transitions = array->number_of_entries();
if (num_transitions > 0) {
- Map* map = array->GetTarget(0);
- DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
- Map* parent = Map::cast(map->constructor_or_backpointer());
- bool parent_is_alive = non_atomic_marking_state()->IsBlackOrGrey(parent);
- DescriptorArray* descriptors =
- parent_is_alive ? parent->instance_descriptors() : nullptr;
- bool descriptors_owner_died =
- CompactTransitionArray(parent, array, descriptors);
- if (descriptors_owner_died) {
- TrimDescriptorArray(parent, descriptors);
+ Map* map;
+ // The array might contain "undefined" elements because it's not yet
+ // filled. Allow it.
+ if (array->GetTargetIfExists(0, isolate(), &map)) {
+ DCHECK_NOT_NULL(map); // WeakCells aren't cleared yet.
+ Map* parent = Map::cast(map->constructor_or_backpointer());
+ bool parent_is_alive =
+ non_atomic_marking_state()->IsBlackOrGrey(parent);
+ DescriptorArray* descriptors =
+ parent_is_alive ? parent->instance_descriptors() : nullptr;
+ bool descriptors_owner_died =
+ CompactTransitionArray(parent, array, descriptors);
+ if (descriptors_owner_died) {
+ TrimDescriptorArray(parent, descriptors);
+ }
}
}
}
@@ -2627,14 +1842,12 @@ bool MarkCompactCollector::CompactTransitionArray(
// array disappeared during GC.
int trim = transitions->Capacity() - transition_index;
if (trim > 0) {
- heap_->RightTrimFixedArray(transitions,
- trim * TransitionArray::kTransitionSize);
+ heap_->RightTrimFixedArray(transitions, trim * TransitionArray::kEntrySize);
transitions->SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
}
-
void MarkCompactCollector::TrimDescriptorArray(Map* map,
DescriptorArray* descriptors) {
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
@@ -2664,7 +1877,6 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
map->set_owns_descriptors(true);
}
-
void MarkCompactCollector::TrimEnumCache(Map* map,
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
@@ -2685,7 +1897,6 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
heap_->RightTrimFixedArray(indices, to_trim);
}
-
void MarkCompactCollector::ProcessWeakCollections() {
MarkCompactMarkingVisitor visitor(this, marking_state());
Object* weak_collection_obj = heap()->encountered_weak_collections();
@@ -2716,7 +1927,6 @@ void MarkCompactCollector::ProcessWeakCollections() {
}
}
-
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
@@ -2739,7 +1949,6 @@ void MarkCompactCollector::ClearWeakCollections() {
heap()->set_encountered_weak_collections(Smi::kZero);
}
-
void MarkCompactCollector::AbortWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::kZero) {
@@ -2751,12 +1960,9 @@ void MarkCompactCollector::AbortWeakCollections() {
heap()->set_encountered_weak_collections(Smi::kZero);
}
-void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
- DependentCode** dependent_code_list) {
+void MarkCompactCollector::ClearWeakCells() {
Heap* heap = this->heap();
TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
- DependentCode* dependent_code_head =
- DependentCode::cast(heap->empty_fixed_array());
WeakCell* weak_cell;
while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
// We do not insert cleared weak cells into the list, so the value
@@ -2782,20 +1988,6 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
} else {
weak_cell->clear();
}
- } else if (value->IsMap()) {
- // The map is non-live.
- Map* map = Map::cast(value);
- // Add dependent code to the dependent_code_list.
- DependentCode* candidate = map->dependent_code();
- // We rely on the fact that the weak code group comes first.
- STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
- if (candidate->length() > 0 &&
- candidate->group() == DependentCode::kWeakCodeGroup) {
- candidate->set_next_link(dependent_code_head);
- dependent_code_head = candidate;
- }
- ClearSimpleMapTransition(weak_cell, map);
- weak_cell->clear();
} else {
// All other objects.
weak_cell->clear();
@@ -2806,12 +1998,35 @@ void MarkCompactCollector::ClearWeakCellsAndSimpleMapTransitions(
RecordSlot(weak_cell, slot, *slot);
}
}
- *dependent_code_list = dependent_code_head;
+}
+
+void MarkCompactCollector::ClearWeakReferences() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
+ std::pair<HeapObject*, HeapObjectReference**> slot;
+ while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
+ HeapObject* value;
+ HeapObjectReference** location = slot.second;
+ if ((*location)->ToWeakHeapObject(&value)) {
+ DCHECK(!value->IsCell());
+ if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
+ // The value of the weak reference is alive.
+ RecordSlot(slot.first, location, value);
+ } else {
+ if (value->IsMap()) {
+ // The map is non-live.
+ ClearPotentialSimpleMapTransition(Map::cast(value));
+ }
+ *location = HeapObjectReference::ClearedValue();
+ }
+ }
+ }
}
void MarkCompactCollector::AbortWeakObjects() {
weak_objects_.weak_cells.Clear();
weak_objects_.transition_arrays.Clear();
+ weak_objects_.weak_references.Clear();
+ weak_objects_.weak_objects_in_code.Clear();
}
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
@@ -2839,30 +2054,57 @@ void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
}
template <AccessMode access_mode>
-static inline SlotCallbackResult UpdateSlot(Object** slot) {
- Object* obj = *slot;
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MapWord map_word = heap_obj->map_word();
- if (map_word.IsForwardingAddress()) {
- DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
- Page::FromAddress(heap_obj->address())
- ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
- HeapObject* target = map_word.ToForwardingAddress();
- if (access_mode == AccessMode::NON_ATOMIC) {
- *slot = target;
- } else {
- base::AsAtomicPointer::Release_CompareAndSwap(slot, obj, target);
- }
- DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
- DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
+static inline SlotCallbackResult UpdateSlot(
+ MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
+ HeapObjectReferenceType reference_type) {
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
+ MaybeObject* target =
+ reference_type == HeapObjectReferenceType::WEAK
+ ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
+ : HeapObjectReference::Strong(map_word.ToForwardingAddress());
+ if (access_mode == AccessMode::NON_ATOMIC) {
+ *slot = target;
+ } else {
+ base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
}
+ DCHECK(!heap_obj->GetHeap()->InFromSpace(target));
+ DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
}
+template <AccessMode access_mode>
+static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
+ MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+ HeapObject* heap_obj;
+ if (obj->ToWeakHeapObject(&heap_obj)) {
+ UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
+ } else if (obj->ToStrongHeapObject(&heap_obj)) {
+ return UpdateSlot<access_mode>(slot, obj, heap_obj,
+ HeapObjectReferenceType::STRONG);
+ }
+ return REMOVE_SLOT;
+}
+
+template <AccessMode access_mode>
+static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
+ DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrongHeapObject());
+ Object** slot = reinterpret_cast<Object**>(maybe_slot);
+ Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
+ heap_obj, HeapObjectReferenceType::STRONG);
+ }
+ return REMOVE_SLOT;
+}
+
// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
// TODO(ulan): Remove code object specific functions. This visitor
@@ -2870,33 +2112,61 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
void VisitPointer(HeapObject* host, Object** p) override {
+ UpdateStrongSlotInternal(p);
+ }
+
+ void VisitPointer(HeapObject* host, MaybeObject** p) override {
UpdateSlotInternal(p);
}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
+ for (Object** p = start; p < end; p++) {
+ UpdateStrongSlotInternal(p);
+ }
+ }
+
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
+ UpdateSlotInternal(p);
+ }
}
void VisitRootPointer(Root root, const char* description,
Object** p) override {
- UpdateSlotInternal(p);
+ UpdateStrongSlotInternal(p);
}
void VisitRootPointers(Root root, const char* description, Object** start,
Object** end) override {
- for (Object** p = start; p < end; p++) UpdateSlotInternal(p);
+ for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
}
void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlotInternal);
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(
+ rinfo, UpdateStrongMaybeObjectSlotInternal);
}
void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
- UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlotInternal);
+ UpdateTypedSlotHelper::UpdateCodeTarget(
+ rinfo, UpdateStrongMaybeObjectSlotInternal);
}
private:
- static inline SlotCallbackResult UpdateSlotInternal(Object** slot) {
+ static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
+ MaybeObject** slot) {
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ DCHECK(!(*slot)->IsClearedWeakHeapObject());
+ return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
+ }
+
+ static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
+ DCHECK(!HasWeakHeapObjectTag(*slot));
+ return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<MaybeObject**>(slot));
+ }
+
+ static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
}
};
@@ -3144,82 +2414,6 @@ void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
}
}
-class YoungGenerationEvacuator : public Evacuator {
- public:
- YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
- RecordMigratedSlotVisitor* record_visitor)
- : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
-
- GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
- return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
- }
-
- protected:
- void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
-
- MinorMarkCompactCollector* collector_;
-};
-
-void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
- intptr_t* live_bytes) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
- "YoungGenerationEvacuator::RawEvacuatePage");
- MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
- collector_->non_atomic_marking_state();
- *live_bytes = marking_state->live_bytes(page);
- switch (ComputeEvacuationMode(page)) {
- case kObjectsNewToOld:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_space_visitor_,
- LiveObjectVisitor::kClearMarkbits);
- // ArrayBufferTracker will be updated during pointers updating.
- break;
- case kPageNewToOld:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_old_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- new_to_old_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
- }
- break;
- case kPageNewToNew:
- LiveObjectVisitor::VisitGreyObjectsNoFail(
- page, marking_state, &new_to_new_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- new_to_new_page_visitor_.account_moved_bytes(
- marking_state->live_bytes(page));
- // TODO(mlippautz): If cleaning array buffers is too slow here we can
- // delay it until the next GC.
- ArrayBufferTracker::FreeDead(page, marking_state);
- if (heap()->ShouldZapGarbage()) {
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- ZAP_FREE_SPACE);
- } else if (heap()->incremental_marking()->IsMarking()) {
- // When incremental marking is on, we need to clear the mark bits of
- // the full collector. We cannot yet discard the young generation mark
- // bits as they are still relevant for pointers updating.
- collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
- IGNORE_FREE_SPACE);
- }
- break;
- case kObjectsOldToOld:
- UNREACHABLE();
- break;
- }
-}
-
class PageEvacuationItem : public ItemParallelJob::Item {
public:
explicit PageEvacuationItem(Page* page) : page_(page) {}
@@ -3265,7 +2459,8 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
const bool profiling =
heap()->isolate()->is_profiling() ||
heap()->isolate()->logger()->is_logging_code_events() ||
- heap()->isolate()->heap_profiler()->is_tracking_object_moves();
+ heap()->isolate()->heap_profiler()->is_tracking_object_moves() ||
+ heap()->has_heap_object_allocation_tracker();
ProfilingMigrationObserver profiling_observer(heap());
const int wanted_num_tasks =
@@ -3286,16 +2481,15 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
- PrintIsolate(
- isolate(),
- "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
- " compaction_speed=%.f\n",
- isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
- wanted_num_tasks, job->NumberOfTasks(),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
- live_bytes, compaction_speed);
+ PrintIsolate(isolate(),
+ "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
+ "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
+ " compaction_speed=%.f\n",
+ isolate()->time_millis_since_init(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
+ wanted_num_tasks, job->NumberOfTasks(),
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
+ live_bytes, compaction_speed);
}
}
@@ -3343,34 +2537,6 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
PostProcessEvacuationCandidates();
}
-void MinorMarkCompactCollector::EvacuatePagesInParallel() {
- ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
- intptr_t live_bytes = 0;
-
- for (Page* page : new_space_evacuation_pages_) {
- intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
- if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
- live_bytes += live_bytes_on_page;
- if (ShouldMovePage(page, live_bytes_on_page)) {
- if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- } else {
- EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
- }
- }
- evacuation_job.AddItem(new PageEvacuationItem(page));
- }
- if (evacuation_job.NumberOfItems() == 0) return;
-
- YoungGenerationMigrationObserver observer(heap(),
- heap()->mark_compact_collector());
- YoungGenerationRecordMigratedSlotVisitor record_visitor(
- heap()->mark_compact_collector());
- CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
- this, &evacuation_job, &record_visitor, &observer, live_bytes);
-}
-
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@@ -3495,7 +2661,7 @@ void MarkCompactCollector::Evacuate() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
- FatalProcessOutOfMemory("NewSpace::Rebalance");
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
}
}
@@ -3604,7 +2770,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
- object->IterateBody(map->instance_type(), size, &visitor);
+ object->IterateBodyFast(map, size, &visitor);
cur += size;
}
}
@@ -3649,28 +2815,34 @@ class RememberedSetUpdatingItem : public UpdatingItem {
private:
inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap_->InFromSpace(*slot)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
- DCHECK(heap_object->IsHeapObject());
+ MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
+ HeapObject* heap_object;
+ if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) {
+ return REMOVE_SLOT;
+ }
+ if (heap_->InFromSpace(heap_object)) {
MapWord map_word = heap_object->map_word();
if (map_word.IsForwardingAddress()) {
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(
+ reinterpret_cast<HeapObjectReference**>(slot),
+ map_word.ToForwardingAddress());
}
+ bool success = (*slot)->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap_->InToSpace(*slot)) {
+ if (heap_->InToSpace(heap_object)) {
return KEEP_SLOT;
}
- } else if (heap_->InToSpace(*slot)) {
+ } else if (heap_->InToSpace(heap_object)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set, or
// if the slot was already updated during old->old updating.
// In case the page has been moved, check markbits to determine liveness
// of the slot. In the other case, the slot can just be kept.
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
if (Page::FromAddress(heap_object->address())
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// IsBlackOrGrey is required because objects are marked as grey for
@@ -3684,7 +2856,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
return KEEP_SLOT;
} else {
- DCHECK(!heap_->InNewSpace(*slot));
+ DCHECK(!heap_->InNewSpace(heap_object));
}
return REMOVE_SLOT;
}
@@ -3704,7 +2876,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
[&filter](Address slot) {
if (!filter.IsValid(slot)) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<Object**>(slot));
+ reinterpret_cast<MaybeObject**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
@@ -3732,7 +2904,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
chunk_,
[isolate, this](SlotType slot_type, Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [this](Object** slot) {
+ isolate, slot_type, slot, [this](MaybeObject** slot) {
return CheckAndUpdateOldToNewSlot(
reinterpret_cast<Address>(slot));
});
@@ -3745,8 +2917,11 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSet<OLD_TO_OLD>::IterateTyped(
chunk_,
[isolate](SlotType slot_type, Address host_addr, Address slot) {
+ // Using UpdateStrongSlot is OK here, because there are no weak
+ // typed slots.
return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, UpdateSlot<AccessMode::NON_ATOMIC>);
+ isolate, slot_type, slot,
+ UpdateStrongSlot<AccessMode::NON_ATOMIC>);
});
}
}
@@ -3757,24 +2932,12 @@ class RememberedSetUpdatingItem : public UpdatingItem {
RememberedSetUpdatingMode updating_mode_;
};
-UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
- MemoryChunk* chunk, Address start, Address end) {
- return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
- chunk, start, end, non_atomic_marking_state());
-}
-
UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
MemoryChunk* chunk, Address start, Address end) {
return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
chunk, start, end, non_atomic_marking_state());
}
-UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
- return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
- heap(), non_atomic_marking_state(), chunk, updating_mode);
-}
-
UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
@@ -3878,21 +3041,6 @@ int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
return pages;
}
-int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
- ItemParallelJob* job) {
- int pages = 0;
- for (Page* p : new_space_evacuation_pages_) {
- if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
- if (p->local_tracker() == nullptr) continue;
-
- pages++;
- job->AddItem(new ArrayBufferTrackerUpdatingItem(
- p, ArrayBufferTrackerUpdatingItem::kRegular));
- }
- }
- return pages;
-}
-
int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
ItemParallelJob* job) {
int pages = 0;
@@ -4017,69 +3165,6 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
-void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
-
- PointersUpdatingVisitor updating_visitor;
- ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- CollectNewSpaceArrayBufferTrackerItems(&updating_job);
- // Create batches of global handles.
- SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
- &updating_job);
- const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
- int remembered_set_pages = 0;
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->old_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->code_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->map_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- remembered_set_pages += CollectRememberedSetUpdatingItems(
- &updating_job, heap()->lo_space(),
- RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
- const int remembered_set_tasks =
- remembered_set_pages == 0 ? 0
- : NumberOfParallelPointerUpdateTasks(
- remembered_set_pages, old_to_new_slots_);
- const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
- for (int i = 0; i < num_tasks; i++) {
- updating_job.AddTask(new PointersUpdatingTask(
- isolate(), GCTracer::BackgroundScope::
- MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
- }
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
- heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
- }
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
- updating_job.Run(isolate()->async_counters());
- heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
- }
-
- {
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
-
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakListRoots(&evacuation_object_retainer);
-
- // Update pointers from external string table.
- heap()->UpdateNewSpaceReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
- heap()->IterateEncounteredWeakCollections(&updating_visitor);
- }
-}
-
void MarkCompactCollector::ReportAbortedEvacuationCandidate(
HeapObject* failed_object, Page* page) {
base::LockGuard<base::Mutex> guard(&mutex_);
@@ -4225,5 +3310,1062 @@ void MarkCompactCollector::StartSweepSpaces() {
}
}
+#ifdef ENABLE_MINOR_MC
+
+namespace {
+
+#ifdef VERIFY_HEAP
+
+class YoungGenerationMarkingVerifier : public MarkingVerifier {
+ public:
+ explicit YoungGenerationMarkingVerifier(Heap* heap)
+ : MarkingVerifier(heap),
+ marking_state_(
+ heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
+
+ Bitmap* bitmap(const MemoryChunk* chunk) override {
+ return marking_state_->bitmap(chunk);
+ }
+
+ bool IsMarked(HeapObject* object) override {
+ return marking_state_->IsGrey(object);
+ }
+
+ bool IsBlackOrGrey(HeapObject* object) override {
+ return marking_state_->IsBlackOrGrey(object);
+ }
+
+ void Run() override {
+ VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyMarking(heap_->new_space());
+ }
+
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ DCHECK(!HasWeakHeapObjectTag(*current));
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ if (!heap_->InNewSpace(object)) return;
+ CHECK(IsMarked(object));
+ }
+ }
+ }
+
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ // Minor MC treats weak references as strong.
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ if (!heap_->InNewSpace(object)) {
+ continue;
+ }
+ CHECK(IsMarked(object));
+ }
+ }
+ }
+
+ private:
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
+ public:
+ explicit YoungGenerationEvacuationVerifier(Heap* heap)
+ : EvacuationVerifier(heap) {}
+
+ void Run() override {
+ VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyEvacuation(heap_->new_space());
+ VerifyEvacuation(heap_->old_space());
+ VerifyEvacuation(heap_->code_space());
+ VerifyEvacuation(heap_->map_space());
+ }
+
+ protected:
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ }
+ }
+ }
+ void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
+ for (MaybeObject** current = start; current < end; current++) {
+ HeapObject* object;
+ if ((*current)->ToStrongOrWeakHeapObject(&object)) {
+ CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ }
+ }
+ }
+};
+
+#endif // VERIFY_HEAP
+
+template <class ParallelItem>
+void SeedGlobalHandles(GlobalHandles* global_handles, ItemParallelJob* job) {
+ // Create batches of global handles.
+ const size_t kGlobalHandlesBufferSize = 1000;
+ const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
+ for (size_t start = 0; start < new_space_nodes;
+ start += kGlobalHandlesBufferSize) {
+ size_t end = start + kGlobalHandlesBufferSize;
+ if (end > new_space_nodes) end = new_space_nodes;
+ job->AddItem(new ParallelItem(global_handles, start, end));
+ }
+}
+
+bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
+ DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
+ return heap->InNewSpace(*p) && !heap->minor_mark_compact_collector()
+ ->non_atomic_marking_state()
+ ->IsGrey(HeapObject::cast(*p));
+}
+
+} // namespace
+
+class YoungGenerationMarkingVisitor final
+ : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
+ public:
+ YoungGenerationMarkingVisitor(
+ Heap* heap, MinorMarkCompactCollector::MarkingState* marking_state,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ : heap_(heap),
+ worklist_(global_worklist, task_id),
+ marking_state_(marking_state) {}
+
+ V8_INLINE void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
+ for (Object** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+ }
+
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ for (MaybeObject** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+ }
+
+ V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
+ Object* target = *slot;
+ DCHECK(!HasWeakHeapObjectTag(target));
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object = HeapObject::cast(target);
+ MarkObjectViaMarkingWorklist(target_object);
+ }
+ }
+
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
+ MaybeObject* target = *slot;
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object;
+ // Treat weak references as strong. TODO(marja): Proper weakness handling
+ // for minor-mcs.
+ if (target->ToStrongOrWeakHeapObject(&target_object)) {
+ MarkObjectViaMarkingWorklist(target_object);
+ }
+ }
+ }
+
+ private:
+ inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
+ if (marking_state_->WhiteToGrey(object)) {
+ // Marking deque overflow is unsupported for the young generation.
+ CHECK(worklist_.Push(object));
+ }
+ }
+
+ Heap* heap_;
+ MinorMarkCompactCollector::MarkingWorklist::View worklist_;
+ MinorMarkCompactCollector::MarkingState* marking_state_;
+};
+
+void MinorMarkCompactCollector::SetUp() {}
+
+void MinorMarkCompactCollector::TearDown() {}
+
+MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
+ : MarkCompactCollectorBase(heap),
+ worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
+ main_marking_visitor_(new YoungGenerationMarkingVisitor(
+ heap, marking_state(), worklist_, kMainMarker)),
+ page_parallel_job_semaphore_(0) {
+ static_assert(
+ kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
+ "more marker tasks than marking deque can handle");
+}
+
+MinorMarkCompactCollector::~MinorMarkCompactCollector() {
+ delete worklist_;
+ delete main_marking_visitor_;
+}
+
+int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
+ DCHECK_GT(pages, 0);
+ if (!FLAG_minor_mc_parallel_marking) return 1;
+ // Pages are not private to markers but we can still use them to estimate the
+ // amount of marking that is required.
+ const int kPagesPerTask = 2;
+ const int wanted_tasks = Max(1, pages / kPagesPerTask);
+ return Min(NumberOfAvailableCores(), Min(wanted_tasks, kNumMarkers));
+}
+
+void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
+ for (Page* p : sweep_to_iterate_pages_) {
+ if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ non_atomic_marking_state()->ClearLiveness(p);
+ }
+ }
+ sweep_to_iterate_pages_.clear();
+}
+
+class YoungGenerationMigrationObserver final : public MigrationObserver {
+ public:
+ YoungGenerationMigrationObserver(Heap* heap,
+ MarkCompactCollector* mark_compact_collector)
+ : MigrationObserver(heap),
+ mark_compact_collector_(mark_compact_collector) {}
+
+ inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ int size) final {
+ // Migrate color to old generation marking in case the object survived young
+ // generation garbage collection.
+ if (heap_->incremental_marking()->IsMarking()) {
+ DCHECK(
+ heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
+ heap_->incremental_marking()->TransferColor(src, dst);
+ }
+ }
+
+ protected:
+ base::Mutex mutex_;
+ MarkCompactCollector* mark_compact_collector_;
+};
+
+class YoungGenerationRecordMigratedSlotVisitor final
+ : public RecordMigratedSlotVisitor {
+ public:
+ explicit YoungGenerationRecordMigratedSlotVisitor(
+ MarkCompactCollector* collector)
+ : RecordMigratedSlotVisitor(collector) {}
+
+ void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ UNREACHABLE();
+ }
+
+ private:
+ // Only record slots for host objects that are considered as live by the full
+ // collector.
+ inline bool IsLive(HeapObject* object) {
+ return collector_->non_atomic_marking_state()->IsBlack(object);
+ }
+
+ inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
+ Address slot) final {
+ if (value->IsStrongOrWeakHeapObject()) {
+ Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ if (p->InNewSpace()) {
+ DCHECK_IMPLIES(p->InToSpace(),
+ p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
+ } else if (p->IsEvacuationCandidate() && IsLive(host)) {
+ RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
+ Page::FromAddress(slot), slot);
+ }
+ }
+ }
+};
+
+void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
+
+ PointersUpdatingVisitor updating_visitor;
+ ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ CollectNewSpaceArrayBufferTrackerItems(&updating_job);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesUpdatingItem>(isolate()->global_handles(),
+ &updating_job);
+ const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
+ int remembered_set_pages = 0;
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->old_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->code_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->map_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ remembered_set_pages += CollectRememberedSetUpdatingItems(
+ &updating_job, heap()->lo_space(),
+ RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
+ const int remembered_set_tasks =
+ remembered_set_pages == 0 ? 0
+ : NumberOfParallelPointerUpdateTasks(
+ remembered_set_pages, old_to_new_slots_);
+ const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
+ for (int i = 0; i < num_tasks; i++) {
+ updating_job.AddTask(new PointersUpdatingTask(
+ isolate(), GCTracer::BackgroundScope::
+ MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
+ updating_job.Run(isolate()->async_counters());
+ heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
+
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakListRoots(&evacuation_object_retainer);
+
+ // Update pointers from external string table.
+ heap()->UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
+ heap()->IterateEncounteredWeakCollections(&updating_visitor);
+ }
+}
+
+class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
+ public:
+ explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
+ : collector_(collector),
+ marking_state_(collector_->non_atomic_marking_state()) {}
+
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ private:
+ void MarkObjectByPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* object = HeapObject::cast(*p);
+
+ if (!collector_->heap()->InNewSpace(object)) return;
+
+ if (marking_state_->WhiteToGrey(object)) {
+ collector_->main_marking_visitor()->Visit(object);
+ collector_->ProcessMarkingWorklist();
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+void MinorMarkCompactCollector::CollectGarbage() {
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
+ heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
+ CleanupSweepToIteratePages();
+ }
+
+ MarkLiveObjects();
+ ClearNonLiveReferences();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ YoungGenerationMarkingVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif // VERIFY_HEAP
+
+ Evacuate();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ YoungGenerationEvacuationVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif // VERIFY_HEAP
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
+ heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
+ for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
+ heap()->new_space()->FromSpaceEnd())) {
+ DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ non_atomic_marking_state()->ClearLiveness(p);
+ if (FLAG_concurrent_marking) {
+ // Ensure that concurrent marker does not track pages that are
+ // going to be unmapped.
+ heap()->concurrent_marking()->ClearLiveness(p);
+ }
+ }
+ }
+
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [](MemoryChunk* chunk) {
+ if (chunk->SweepingDone()) {
+ RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
+ } else {
+ RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
+ }
+ });
+
+ heap()->account_external_memory_concurrently_freed();
+}
+
+void MinorMarkCompactCollector::MakeIterable(
+ Page* p, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ // We have to clear the full collectors markbits for the areas that we
+ // remove here.
+ MarkCompactCollector* full_collector = heap()->mark_compact_collector();
+ Address free_start = p->area_start();
+ DCHECK_EQ(0, reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize));
+
+ for (auto object_and_size :
+ LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
+ HeapObject* const object = object_and_size.first;
+ DCHECK(non_atomic_marking_state()->IsGrey(object));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
+ full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(free_end));
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xCC, size);
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ free_start = free_end + size;
+ }
+
+ if (free_start != p->area_end()) {
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
+ full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(p->area_end()));
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xCC, size);
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+
+ if (marking_mode == MarkingTreatmentMode::CLEAR) {
+ non_atomic_marking_state()->ClearLiveness(p);
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ }
+}
+
+namespace {
+
+// Helper class for pruning the string table.
+class YoungGenerationExternalStringTableCleaner : public RootVisitor {
+ public:
+ YoungGenerationExternalStringTableCleaner(
+ MinorMarkCompactCollector* collector)
+ : heap_(collector->heap()),
+ marking_state_(collector->non_atomic_marking_state()) {}
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ DCHECK_EQ(static_cast<int>(root),
+ static_cast<int>(Root::kExternalStringsTable));
+ // Visit all HeapObject pointers in [start, end).
+ for (Object** p = start; p < end; p++) {
+ Object* o = *p;
+ if (o->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (marking_state_->IsWhite(heap_object)) {
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o->IsThinString());
+ }
+ // Set the entry to the_hole_value (as deleted).
+ *p = heap_->the_hole_value();
+ }
+ }
+ }
+ }
+
+ private:
+ Heap* heap_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+// Marked young generation objects and all old generation objects will be
+// retained.
+class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ explicit MinorMarkCompactWeakObjectRetainer(
+ MinorMarkCompactCollector* collector)
+ : heap_(collector->heap()),
+ marking_state_(collector->non_atomic_marking_state()) {}
+
+ virtual Object* RetainAs(Object* object) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!heap_->InNewSpace(heap_object)) return object;
+
+ // Young generation marking only marks to grey instead of black.
+ DCHECK(!marking_state_->IsBlack(heap_object));
+ if (marking_state_->IsGrey(heap_object)) {
+ return object;
+ }
+ return nullptr;
+ }
+
+ private:
+ Heap* heap_;
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
+};
+
+} // namespace
+
+void MinorMarkCompactCollector::ClearNonLiveReferences() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
+ // Internalized strings are always stored in old space, so there is no need
+ // to clean them here.
+ YoungGenerationExternalStringTableCleaner external_visitor(this);
+ heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
+ heap()->external_string_table_.CleanUpNewSpaceStrings();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
+ // Process the weak references.
+ MinorMarkCompactWeakObjectRetainer retainer(this);
+ heap()->ProcessYoungWeakReferences(&retainer);
+ }
+}
+
+void MinorMarkCompactCollector::EvacuatePrologue() {
+ NewSpace* new_space = heap()->new_space();
+ // Append the list of new space pages to be processed.
+ for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+ new_space_evacuation_pages_.push_back(p);
+ }
+ new_space->Flip();
+ new_space->ResetLinearAllocationArea();
+}
+
+void MinorMarkCompactCollector::EvacuateEpilogue() {
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+}
+
+UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
+ MemoryChunk* chunk, Address start, Address end) {
+ return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
+ chunk, start, end, non_atomic_marking_state());
+}
+
+UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
+ MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
+ return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
+ heap(), non_atomic_marking_state(), chunk, updating_mode);
+}
+
+class MarkingItem;
+class GlobalHandlesMarkingItem;
+class PageMarkingItem;
+class RootMarkingItem;
+class YoungGenerationMarkingTask;
+
+class MarkingItem : public ItemParallelJob::Item {
+ public:
+ virtual ~MarkingItem() {}
+ virtual void Process(YoungGenerationMarkingTask* task) = 0;
+};
+
+class YoungGenerationMarkingTask : public ItemParallelJob::Task {
+ public:
+ YoungGenerationMarkingTask(
+ Isolate* isolate, MinorMarkCompactCollector* collector,
+ MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
+ : ItemParallelJob::Task(isolate),
+ collector_(collector),
+ marking_worklist_(global_worklist, task_id),
+ marking_state_(collector->marking_state()),
+ visitor_(isolate->heap(), marking_state_, global_worklist, task_id) {
+ local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
+ Page::kPageSize);
+ }
+
+ void RunInParallel() override {
+ TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
+ GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ MarkingItem* item = nullptr;
+ while ((item = GetItem<MarkingItem>()) != nullptr) {
+ item->Process(this);
+ item->MarkFinished();
+ EmptyLocalMarkingWorklist();
+ }
+ EmptyMarkingWorklist();
+ DCHECK(marking_worklist_.IsLocalEmpty());
+ FlushLiveBytes();
+ }
+ if (FLAG_trace_minor_mc_parallel_marking) {
+ PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
+ static_cast<void*>(this), marking_time);
+ }
+ };
+
+ void MarkObject(Object* object) {
+ if (!collector_->heap()->InNewSpace(object)) return;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (marking_state_->WhiteToGrey(heap_object)) {
+ const int size = visitor_.Visit(heap_object);
+ IncrementLiveBytes(heap_object, size);
+ }
+ }
+
+ private:
+ void EmptyLocalMarkingWorklist() {
+ HeapObject* object = nullptr;
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void EmptyMarkingWorklist() {
+ HeapObject* object = nullptr;
+ while (marking_worklist_.Pop(&object)) {
+ const int size = visitor_.Visit(object);
+ IncrementLiveBytes(object, size);
+ }
+ }
+
+ void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
+ local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
+ bytes;
+ }
+
+ void FlushLiveBytes() {
+ for (auto pair : local_live_bytes_) {
+ marking_state_->IncrementLiveBytes(pair.first, pair.second);
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+ MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
+ MinorMarkCompactCollector::MarkingState* marking_state_;
+ YoungGenerationMarkingVisitor visitor_;
+ std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
+};
+
+class BatchedRootMarkingItem : public MarkingItem {
+ public:
+ explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
+ : objects_(objects) {}
+ virtual ~BatchedRootMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "BatchedRootMarkingItem::Process");
+ for (Object* object : objects_) {
+ task->MarkObject(object);
+ }
+ }
+
+ private:
+ std::vector<Object*> objects_;
+};
+
+class PageMarkingItem : public MarkingItem {
+ public:
+ explicit PageMarkingItem(MemoryChunk* chunk,
+ base::AtomicNumber<intptr_t>* global_slots)
+ : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
+ virtual ~PageMarkingItem() { global_slots_->Increment(slots_); }
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "PageMarkingItem::Process");
+ base::LockGuard<base::Mutex> guard(chunk_->mutex());
+ MarkUntypedPointers(task);
+ MarkTypedPointers(task);
+ }
+
+ private:
+ inline Heap* heap() { return chunk_->heap(); }
+
+ void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk_,
+ [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+
+ void MarkTypedPointers(YoungGenerationMarkingTask* task) {
+ Isolate* isolate = heap()->isolate();
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
+ Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, [this, task](MaybeObject** slot) {
+ return CheckAndMarkObject(task,
+ reinterpret_cast<Address>(slot));
+ });
+ });
+ }
+
+ SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
+ Address slot_address) {
+ MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
+ if (heap()->InNewSpace(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in ToSpace.
+ DCHECK(heap()->InToSpace(object));
+ HeapObject* heap_object;
+ bool success = object->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
+ task->MarkObject(heap_object);
+ slots_++;
+ return KEEP_SLOT;
+ }
+ return REMOVE_SLOT;
+ }
+
+ MemoryChunk* chunk_;
+ base::AtomicNumber<intptr_t>* global_slots_;
+ intptr_t slots_;
+};
+
+class GlobalHandlesMarkingItem : public MarkingItem {
+ public:
+ GlobalHandlesMarkingItem(GlobalHandles* global_handles, size_t start,
+ size_t end)
+ : global_handles_(global_handles), start_(start), end_(end) {}
+ virtual ~GlobalHandlesMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "GlobalHandlesMarkingItem::Process");
+ GlobalHandlesRootMarkingVisitor visitor(task);
+ global_handles_
+ ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
+ &visitor, start_, end_);
+ }
+
+ private:
+ class GlobalHandlesRootMarkingVisitor : public RootVisitor {
+ public:
+ explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
+ : task_(task) {}
+
+ void VisitRootPointer(Root root, const char* description,
+ Object** p) override {
+ DCHECK_EQ(Root::kGlobalHandles, root);
+ task_->MarkObject(*p);
+ }
+
+ void VisitRootPointers(Root root, const char* description, Object** start,
+ Object** end) override {
+ DCHECK_EQ(Root::kGlobalHandles, root);
+ for (Object** p = start; p < end; p++) {
+ task_->MarkObject(*p);
+ }
+ }
+
+ private:
+ YoungGenerationMarkingTask* task_;
+ };
+
+ GlobalHandles* global_handles_;
+ size_t start_;
+ size_t end_;
+};
+
+void MinorMarkCompactCollector::MarkRootSetInParallel() {
+ base::AtomicNumber<intptr_t> slots;
+ {
+ ItemParallelJob job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ // Seed the root set (roots + old->new set).
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ // Create batches of roots.
+ RootMarkingVisitorSeedOnly<BatchedRootMarkingItem> root_seed_visitor(
+ &job);
+ heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
+ // Create batches of global handles.
+ SeedGlobalHandles<GlobalHandlesMarkingItem>(isolate()->global_handles(),
+ &job);
+ // Create items for each page.
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&job, &slots](MemoryChunk* chunk) {
+ job.AddItem(new PageMarkingItem(chunk, &slots));
+ });
+ // Flush any remaining objects in the seeding visitor.
+ root_seed_visitor.FlushObjects();
+ }
+
+ // Add tasks and run in parallel.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ const int new_space_pages =
+ static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
+ const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
+ for (int i = 0; i < num_tasks; i++) {
+ job.AddTask(
+ new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
+ }
+ job.Run(isolate()->async_counters());
+ DCHECK(worklist()->IsGlobalEmpty());
+ }
+ }
+ old_to_new_slots_ = static_cast<int>(slots.Value());
+}
+
+void MinorMarkCompactCollector::MarkLiveObjects() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
+
+ PostponeInterruptsScope postpone(isolate());
+
+ RootMarkingVisitor root_visitor(this);
+
+ MarkRootSetInParallel();
+
+ // Mark rest on the main thread.
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+ heap()->IterateEncounteredWeakCollections(&root_visitor);
+ ProcessMarkingWorklist();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnmarkedObjectForYoungGeneration);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
+ &root_visitor, &IsUnmarkedObjectForYoungGeneration);
+ ProcessMarkingWorklist();
+ }
+}
+
+void MinorMarkCompactCollector::ProcessMarkingWorklist() {
+ MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
+ HeapObject* object = nullptr;
+ while (marking_worklist.Pop(&object)) {
+ DCHECK(!object->IsFiller());
+ DCHECK(object->IsHeapObject());
+ DCHECK(heap()->Contains(object));
+ DCHECK(non_atomic_marking_state()->IsGrey(object));
+ main_marking_visitor()->Visit(object);
+ }
+ DCHECK(marking_worklist.IsLocalEmpty());
+}
+
+void MinorMarkCompactCollector::Evacuate() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
+ base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
+ EvacuatePrologue();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
+ EvacuatePagesInParallel();
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->Rebalance()) {
+ heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
+ for (Page* p : new_space_evacuation_pages_) {
+ if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
+ p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ p->SetFlag(Page::SWEEP_TO_ITERATE);
+ sweep_to_iterate_pages_.push_back(p);
+ }
+ }
+ new_space_evacuation_pages_.clear();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
+ EvacuateEpilogue();
+ }
+}
+
+namespace {
+
+class YoungGenerationEvacuator : public Evacuator {
+ public:
+ YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
+ RecordMigratedSlotVisitor* record_visitor)
+ : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+
+ GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
+ return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
+ }
+
+ protected:
+ void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
+
+ MinorMarkCompactCollector* collector_;
+};
+
+void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+ intptr_t* live_bytes) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ "YoungGenerationEvacuator::RawEvacuatePage");
+ MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
+ collector_->non_atomic_marking_state();
+ *live_bytes = marking_state->live_bytes(page);
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_space_visitor_,
+ LiveObjectVisitor::kClearMarkbits);
+ // ArrayBufferTracker will be updated during pointers updating.
+ break;
+ case kPageNewToOld:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_to_old_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ new_to_old_page_visitor_.account_moved_bytes(
+ marking_state->live_bytes(page));
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
+ break;
+ case kPageNewToNew:
+ LiveObjectVisitor::VisitGreyObjectsNoFail(
+ page, marking_state, &new_to_new_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ new_to_new_page_visitor_.account_moved_bytes(
+ marking_state->live_bytes(page));
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, marking_state);
+ if (heap()->ShouldZapGarbage()) {
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ } else if (heap()->incremental_marking()->IsMarking()) {
+ // When incremental marking is on, we need to clear the mark bits of
+ // the full collector. We cannot yet discard the young generation mark
+ // bits as they are still relevant for pointers updating.
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ IGNORE_FREE_SPACE);
+ }
+ break;
+ case kObjectsOldToOld:
+ UNREACHABLE();
+ break;
+ }
+}
+
+} // namespace
+
+void MinorMarkCompactCollector::EvacuatePagesInParallel() {
+ ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+ intptr_t live_bytes = 0;
+
+ for (Page* page : new_space_evacuation_pages_) {
+ intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
+ if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
+ live_bytes += live_bytes_on_page;
+ if (ShouldMovePage(page, live_bytes_on_page)) {
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ } else {
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
+ }
+ }
+ evacuation_job.AddItem(new PageEvacuationItem(page));
+ }
+ if (evacuation_job.NumberOfItems() == 0) return;
+
+ YoungGenerationMigrationObserver observer(heap(),
+ heap()->mark_compact_collector());
+ YoungGenerationRecordMigratedSlotVisitor record_visitor(
+ heap()->mark_compact_collector());
+ CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
+ this, &evacuation_job, &record_visitor, &observer, live_bytes);
+}
+
+int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
+ ItemParallelJob* job) {
+ int pages = 0;
+ for (Page* p : new_space_evacuation_pages_) {
+ if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
+ if (p->local_tracker() == nullptr) continue;
+
+ pages++;
+ job->AddItem(new ArrayBufferTrackerUpdatingItem(
+ p, ArrayBufferTrackerUpdatingItem::kRegular));
+ }
+ }
+ return pages;
+}
+
+#endif // ENABLE_MINOR_MC
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 755f0eb4eb..944b139a59 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -117,7 +117,7 @@ class MarkBitCellIterator {
return cell_base_;
}
- MUST_USE_RESULT inline bool Advance() {
+ V8_WARN_UNUSED_RESULT inline bool Advance() {
cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
return ++cell_index_ != last_cell_index_;
}
@@ -354,76 +354,6 @@ class MinorNonAtomicMarkingState final
}
};
-// Collector for young-generation only.
-class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
- public:
- using MarkingState = MinorMarkingState;
- using NonAtomicMarkingState = MinorNonAtomicMarkingState;
-
- explicit MinorMarkCompactCollector(Heap* heap);
- ~MinorMarkCompactCollector();
-
- MarkingState* marking_state() { return &marking_state_; }
-
- NonAtomicMarkingState* non_atomic_marking_state() {
- return &non_atomic_marking_state_;
- }
-
- void SetUp() override;
- void TearDown() override;
- void CollectGarbage() override;
-
- void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
- FreeSpaceTreatmentMode free_space_mode);
- void CleanupSweepToIteratePages();
-
- private:
- using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
- class RootMarkingVisitor;
-
- static const int kNumMarkers = 8;
- static const int kMainMarker = 0;
-
- inline MarkingWorklist* worklist() { return worklist_; }
-
- inline YoungGenerationMarkingVisitor* main_marking_visitor() {
- return main_marking_visitor_;
- }
-
- void MarkLiveObjects() override;
- void MarkRootSetInParallel();
- void ProcessMarkingWorklist() override;
- void ClearNonLiveReferences() override;
-
- void EvacuatePrologue() override;
- void EvacuateEpilogue() override;
- void Evacuate() override;
- void EvacuatePagesInParallel() override;
- void UpdatePointersAfterEvacuation() override;
-
- UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
- Address end) override;
- UpdatingItem* CreateRememberedSetUpdatingItem(
- MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
-
- int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
-
- int NumberOfParallelMarkingTasks(int pages);
-
- MarkingWorklist* worklist_;
-
- YoungGenerationMarkingVisitor* main_marking_visitor_;
- base::Semaphore page_parallel_job_semaphore_;
- std::vector<Page*> new_space_evacuation_pages_;
- std::vector<Page*> sweep_to_iterate_pages_;
-
- MarkingState marking_state_;
- NonAtomicMarkingState non_atomic_marking_state_;
-
- friend class YoungGenerationMarkingTask;
- friend class YoungGenerationMarkingVisitor;
-};
-
// This marking state is used when concurrent marking is running.
class IncrementalMarkingState final
: public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
@@ -495,6 +425,10 @@ class MajorNonAtomicMarkingState final
struct WeakObjects {
Worklist<WeakCell*, 64> weak_cells;
Worklist<TransitionArray*, 64> transition_arrays;
+ // TODO(marja): For old space, we only need the slot, not the host
+ // object. Optimize this by adding a different storage for old space.
+ Worklist<std::pair<HeapObject*, HeapObjectReference**>, 64> weak_references;
+ Worklist<std::pair<HeapObject*, Code*>, 64> weak_objects_in_code;
};
// Collector for young and old generation.
@@ -658,7 +592,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
- static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
+ static inline bool IsOnEvacuationCandidate(Object* obj) {
+ return Page::FromAddress(reinterpret_cast<Address>(obj))
+ ->IsEvacuationCandidate();
+ }
+
+ static inline bool IsOnEvacuationCandidate(MaybeObject* obj) {
return Page::FromAddress(reinterpret_cast<Address>(obj))
->IsEvacuationCandidate();
}
@@ -666,6 +605,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
V8_INLINE static void RecordSlot(HeapObject* object, Object** slot,
Object* target);
+ V8_INLINE static void RecordSlot(HeapObject* object,
+ HeapObjectReference** slot, Object* target);
void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
@@ -699,6 +640,15 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
+ void AddWeakReference(HeapObject* host, HeapObjectReference** slot) {
+ weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
+ }
+
+ void AddWeakObjectInCode(HeapObject* object, Code* code) {
+ weak_objects_.weak_objects_in_code.Push(kMainThread,
+ std::make_pair(object, code));
+ }
+
Sweeper* sweeper() { return sweeper_; }
#ifdef DEBUG
@@ -711,9 +661,9 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
+ void VerifyMarkbitsAreDirty(PagedSpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedObjectsInCode();
#endif
private:
@@ -774,13 +724,12 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
void ClearNonLiveReferences() override;
- void MarkDependentCodeForDeoptimization(DependentCode* list);
+ void MarkDependentCodeForDeoptimization();
// Checks if the given weak cell is a simple transition from the parent map
// of the given dead target. If so it clears the transition and trims
// the descriptor array of the parent if needed.
- void ClearSimpleMapTransition(WeakCell* potential_transition,
- Map* dead_target);
- void ClearSimpleMapTransition(Map* map, Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map* dead_target);
+ void ClearPotentialSimpleMapTransition(Map* map, Map* dead_target);
// Compact every array in the global list of transition arrays and
// trim the corresponding descriptor array if a transition target is non-live.
void ClearFullMapTransitions();
@@ -807,8 +756,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// dead values. If the value is a dead map and the parent map transitions to
// the dead map via weak cell, then this function also clears the map
// transition.
- void ClearWeakCellsAndSimpleMapTransitions(
- DependentCode** dependent_code_list);
+ void ClearWeakCells();
+ void ClearWeakReferences();
void AbortWeakObjects();
// Starts sweeping of spaces by contributing on the main thread and setting
@@ -917,8 +866,11 @@ class MarkingVisitor final
// ObjectVisitor implementation.
V8_INLINE void VisitPointer(HeapObject* host, Object** p) final;
+ V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** p) final;
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final;
V8_INLINE void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final;
V8_INLINE void VisitCodeTarget(Code* host, RelocInfo* rinfo) final;
@@ -962,6 +914,80 @@ class EvacuationScope {
MarkCompactCollector* collector_;
};
+#ifdef ENABLE_MINOR_MC
+
+// Collector for young-generation only.
+class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
+ public:
+ using MarkingState = MinorMarkingState;
+ using NonAtomicMarkingState = MinorNonAtomicMarkingState;
+
+ explicit MinorMarkCompactCollector(Heap* heap);
+ ~MinorMarkCompactCollector();
+
+ MarkingState* marking_state() { return &marking_state_; }
+
+ NonAtomicMarkingState* non_atomic_marking_state() {
+ return &non_atomic_marking_state_;
+ }
+
+ void SetUp() override;
+ void TearDown() override;
+ void CollectGarbage() override;
+
+ void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+ void CleanupSweepToIteratePages();
+
+ private:
+ using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
+ class RootMarkingVisitor;
+
+ static const int kNumMarkers = 8;
+ static const int kMainMarker = 0;
+
+ inline MarkingWorklist* worklist() { return worklist_; }
+
+ inline YoungGenerationMarkingVisitor* main_marking_visitor() {
+ return main_marking_visitor_;
+ }
+
+ void MarkLiveObjects() override;
+ void MarkRootSetInParallel();
+ void ProcessMarkingWorklist() override;
+ void ClearNonLiveReferences() override;
+
+ void EvacuatePrologue() override;
+ void EvacuateEpilogue() override;
+ void Evacuate() override;
+ void EvacuatePagesInParallel() override;
+ void UpdatePointersAfterEvacuation() override;
+
+ UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
+ Address end) override;
+ UpdatingItem* CreateRememberedSetUpdatingItem(
+ MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
+
+ int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
+
+ int NumberOfParallelMarkingTasks(int pages);
+
+ MarkingWorklist* worklist_;
+
+ YoungGenerationMarkingVisitor* main_marking_visitor_;
+ base::Semaphore page_parallel_job_semaphore_;
+ std::vector<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> sweep_to_iterate_pages_;
+
+ MarkingState marking_state_;
+ NonAtomicMarkingState non_atomic_marking_state_;
+
+ friend class YoungGenerationMarkingTask;
+ friend class YoungGenerationMarkingVisitor;
+};
+
+#endif // ENABLE_MINOR_MC
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking.cc b/deps/v8/src/heap/marking.cc
index 5e631187d2..23fbdd3465 100644
--- a/deps/v8/src/heap/marking.cc
+++ b/deps/v8/src/heap/marking.cc
@@ -17,6 +17,16 @@ void Bitmap::Clear() {
base::SeqCst_MemoryFence();
}
+void Bitmap::MarkAllBits() {
+ base::Atomic32* cell_base = reinterpret_cast<base::Atomic32*>(cells());
+ for (int i = 0; i < CellsCount(); i++) {
+ base::Relaxed_Store(cell_base + i, 0xffffffff);
+ }
+ // This fence prevents re-ordering of publishing stores with the mark-bit
+ // clearing stores.
+ base::SeqCst_MemoryFence();
+}
+
void Bitmap::SetRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 58630c52f0..bfa813091e 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -142,6 +142,8 @@ class V8_EXPORT_PRIVATE Bitmap {
void Clear();
+ void MarkAllBits();
+
// Clears bits in the given cell. The mask specifies bits to clear: if a
// bit is set in the mask then the corresponding bit is cleared in the cell.
template <AccessMode mode = AccessMode::NON_ATOMIC>
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 77317a7b8a..baa4d6c00b 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -201,8 +201,8 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
- if (!heap()->use_tasks()) return;
DCHECK_LT(0, delay_ms);
+ if (heap()->IsTearingDown()) return;
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index b854dabb2c..008ba7879d 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -277,7 +277,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject* obj,
CowMode check_cow_array) {
- if (obj->IsFixedArray()) {
+ if (obj->IsFixedArrayExact()) {
FixedArray* fixed_array = FixedArray::cast(obj);
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
@@ -480,7 +480,7 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
Object* raw_object = vector->get(slot.ToInt() + i);
if (!raw_object->IsHeapObject()) continue;
HeapObject* object = HeapObject::cast(raw_object);
- if (object->IsCell() || object->IsFixedArray()) {
+ if (object->IsCell() || object->IsFixedArrayExact()) {
RecordSimpleVirtualObjectStats(
vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
@@ -531,7 +531,7 @@ void ObjectStatsCollectorImpl::CollectStatistics(HeapObject* obj, Phase phase) {
RecordVirtualContext(Context::cast(obj));
} else if (obj->IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj->IsFixedArray()) {
+ } else if (obj->IsFixedArrayExact()) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
}
@@ -552,9 +552,6 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
}
// FixedArray.
- RecordSimpleVirtualObjectStats(
- nullptr, heap_->weak_new_space_object_to_code_list(),
- ObjectStats::WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE);
RecordSimpleVirtualObjectStats(nullptr, heap_->serialized_objects(),
ObjectStats::SERIALIZED_OBJECTS_TYPE);
RecordSimpleVirtualObjectStats(nullptr, heap_->number_string_cache(),
@@ -569,23 +566,20 @@ void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
RecordSimpleVirtualObjectStats(nullptr, heap_->retained_maps(),
ObjectStats::RETAINED_MAPS_TYPE);
- // WeakFixedArray.
+ // FixedArrayOfWeakCells.
RecordSimpleVirtualObjectStats(
- nullptr, WeakFixedArray::cast(heap_->noscript_shared_function_infos()),
+ nullptr,
+ FixedArrayOfWeakCells::cast(heap_->noscript_shared_function_infos()),
ObjectStats::NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- RecordSimpleVirtualObjectStats(nullptr,
- WeakFixedArray::cast(heap_->script_list()),
- ObjectStats::SCRIPT_LIST_TYPE);
+ RecordSimpleVirtualObjectStats(
+ nullptr, FixedArrayOfWeakCells::cast(heap_->script_list()),
+ ObjectStats::SCRIPT_LIST_TYPE);
// HashTable.
RecordHashTableVirtualObjectStats(nullptr, heap_->string_table(),
ObjectStats::STRING_TABLE_TYPE);
RecordHashTableVirtualObjectStats(nullptr, heap_->code_stubs(),
ObjectStats::CODE_STUBS_TABLE_TYPE);
-
- // WeakHashTable.
- RecordHashTableVirtualObjectStats(nullptr, heap_->weak_object_to_code_table(),
- ObjectStats::OBJECT_TO_CODE_TYPE);
}
void ObjectStatsCollectorImpl::RecordObjectStats(HeapObject* obj,
@@ -630,8 +624,8 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
if (map->prototype_info()->IsPrototypeInfo()) {
PrototypeInfo* info = PrototypeInfo::cast(map->prototype_info());
Object* users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
- RecordSimpleVirtualObjectStats(map, WeakFixedArray::cast(users),
+ if (users->IsFixedArrayOfWeakCells()) {
+ RecordSimpleVirtualObjectStats(map, FixedArrayOfWeakCells::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
}
@@ -639,18 +633,9 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map* map) {
}
void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script* script) {
- FixedArray* infos = script->shared_function_infos();
RecordSimpleVirtualObjectStats(
script, script->shared_function_infos(),
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- // Split off weak cells from the regular weak cell type.
- for (int i = 0; i < infos->length(); i++) {
- if (infos->get(i)->IsWeakCell()) {
- RecordSimpleVirtualObjectStats(
- infos, WeakCell::cast(infos->get(i)),
- ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
- }
- }
// Log the size of external source code.
Object* source = script->source();
@@ -681,11 +666,6 @@ void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
RecordSimpleVirtualObjectStats(
nullptr, info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
- // SharedFunctonInfo::feedback_metadata() is a COW array.
- FeedbackMetadata* fm = FeedbackMetadata::cast(info->feedback_metadata());
- RecordVirtualObjectStats(info, fm, ObjectStats::FEEDBACK_METADATA_TYPE,
- fm->Size(), ObjectStats::kNoOverAllocation,
- kIgnoreCow);
}
void ObjectStatsCollectorImpl::RecordVirtualJSFunctionDetails(
@@ -702,7 +682,7 @@ namespace {
bool MatchesConstantElementsPair(Object* object) {
if (!object->IsTuple2()) return false;
Tuple2* tuple = Tuple2::cast(object);
- return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArray();
+ return tuple->value1()->IsSmi() && tuple->value2()->IsFixedArrayExact();
}
} // namespace
@@ -711,20 +691,19 @@ void ObjectStatsCollectorImpl::
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
HeapObject* parent, HeapObject* object,
ObjectStats::VirtualInstanceType type) {
- if (RecordSimpleVirtualObjectStats(parent, object, type)) {
- if (object->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); i++) {
- Object* entry = array->get(i);
- if (!entry->IsHeapObject()) continue;
- RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- array, HeapObject::cast(entry), type);
- }
- } else if (MatchesConstantElementsPair(object)) {
- Tuple2* tuple = Tuple2::cast(object);
+ if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
+ if (object->IsFixedArrayExact()) {
+ FixedArray* array = FixedArray::cast(object);
+ for (int i = 0; i < array->length(); i++) {
+ Object* entry = array->get(i);
+ if (!entry->IsHeapObject()) continue;
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- tuple, HeapObject::cast(tuple->value2()), type);
+ array, HeapObject::cast(entry), type);
}
+ } else if (MatchesConstantElementsPair(object)) {
+ Tuple2* tuple = Tuple2::cast(object);
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ tuple, HeapObject::cast(tuple->value2()), type);
}
}
@@ -738,7 +717,7 @@ void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
FixedArray* constant_pool = FixedArray::cast(bytecode->constant_pool());
for (int i = 0; i < constant_pool->length(); i++) {
Object* entry = constant_pool->get(i);
- if (entry->IsFixedArray() || MatchesConstantElementsPair(entry)) {
+ if (entry->IsFixedArrayExact() || MatchesConstantElementsPair(entry)) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
@@ -786,7 +765,7 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Object* target = it.rinfo()->target_object();
- if (target->IsFixedArray() || MatchesConstantElementsPair(target)) {
+ if (target->IsFixedArrayExact() || MatchesConstantElementsPair(target)) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
@@ -796,11 +775,9 @@ void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code* code) {
void ObjectStatsCollectorImpl::RecordVirtualContext(Context* context) {
if (context->IsNativeContext()) {
- RecordSimpleVirtualObjectStats(nullptr, context,
- ObjectStats::NATIVE_CONTEXT_TYPE);
+ RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context->Size());
} else if (context->IsFunctionContext()) {
- RecordSimpleVirtualObjectStats(nullptr, context,
- ObjectStats::FUNCTION_CONTEXT_TYPE);
+ RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context->Size());
} else {
RecordSimpleVirtualObjectStats(nullptr, context,
ObjectStats::OTHER_CONTEXT_TYPE);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 723ae53fd5..3648b9985b 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -29,7 +29,6 @@
V(EMBEDDED_OBJECT_TYPE) \
V(ENUM_CACHE_TYPE) \
V(ENUM_INDICES_CACHE_TYPE) \
- V(FEEDBACK_METADATA_TYPE) \
V(FEEDBACK_VECTOR_ENTRY_TYPE) \
V(FEEDBACK_VECTOR_HEADER_TYPE) \
V(FEEDBACK_VECTOR_SLOT_CALL_TYPE) \
@@ -40,14 +39,12 @@
V(FEEDBACK_VECTOR_SLOT_OTHER_TYPE) \
V(FEEDBACK_VECTOR_SLOT_STORE_TYPE) \
V(FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE) \
- V(FUNCTION_CONTEXT_TYPE) \
V(FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE) \
V(GLOBAL_ELEMENTS_TYPE) \
V(GLOBAL_PROPERTIES_TYPE) \
V(JS_ARRAY_BOILERPLATE_TYPE) \
V(JS_COLLETION_TABLE_TYPE) \
V(JS_OBJECT_BOILERPLATE_TYPE) \
- V(NATIVE_CONTEXT_TYPE) \
V(NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE) \
V(NUMBER_STRING_CACHE_TYPE) \
V(OBJECT_PROPERTY_DICTIONARY_TYPE) \
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 8384cead02..b47dba7830 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -71,17 +71,22 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
host, reinterpret_cast<Object**>(map));
}
-#define VISIT(type) \
- template <typename ResultType, typename ConcreteVisitor> \
- ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
- Map* map, type* object) { \
- ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
- if (!visitor->ShouldVisit(object)) return ResultType(); \
- int size = type::BodyDescriptor::SizeOf(map, object); \
- if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object->map_slot()); \
- type::BodyDescriptor::IterateBody(object, size, visitor); \
- return static_cast<ResultType>(size); \
+#define VISIT(type) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
+ Map* map, type* object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ if (!visitor->AllowDefaultJSObjectVisit()) { \
+ DCHECK_WITH_MSG(!map->IsJSObjectMap(), \
+ "Implement custom visitor for new JSObject subclass in " \
+ "concurrent marker"); \
+ } \
+ int size = type::BodyDescriptor::SizeOf(map, object); \
+ if (visitor->ShouldVisitMapPointer()) \
+ visitor->VisitMapPointer(object, object->map_slot()); \
+ type::BodyDescriptor::IterateBody(map, object, size, visitor); \
+ return static_cast<ResultType>(size); \
}
TYPED_VISITOR_ID_LIST(VISIT)
#undef VISIT
@@ -100,7 +105,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
int size = Context::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- Context::BodyDescriptor::IterateBody(object, size, visitor);
+ Context::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -123,7 +128,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- JSObject::FastBodyDescriptor::IterateBody(object, size, visitor);
+ JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -135,7 +140,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
int size = JSObject::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- JSObject::BodyDescriptor::IterateBody(object, size, visitor);
+ JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -147,7 +152,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
int size = map->instance_size();
if (visitor->ShouldVisitMapPointer())
visitor->VisitMapPointer(object, object->map_slot());
- StructBodyDescriptor::IterateBody(object, size, visitor);
+ StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -166,7 +171,7 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitJSFunction(Map* map,
JSFunction* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
int size = JSFunction::BodyDescriptorWeak::SizeOf(map, object);
- JSFunction::BodyDescriptorWeak::IterateBody(object, size, visitor);
+ JSFunction::BodyDescriptorWeak::IterateBody(map, object, size, visitor);
return size;
}
@@ -175,7 +180,7 @@ int NewSpaceVisitor<ConcreteVisitor>::VisitNativeContext(Map* map,
Context* object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
int size = Context::BodyDescriptor::SizeOf(map, object);
- Context::BodyDescriptor::IterateBody(object, size, visitor);
+ Context::BodyDescriptor::IterateBody(map, object, size, visitor);
return size;
}
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 7746c91c71..88e38dcb0c 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -40,7 +40,6 @@ class JSWeakCollection;
V(JSArrayBuffer) \
V(JSFunction) \
V(JSObject) \
- V(JSRegExp) \
V(JSWeakCollection) \
V(Map) \
V(Oddball) \
@@ -55,7 +54,9 @@ class JSWeakCollection;
V(Symbol) \
V(ThinString) \
V(TransitionArray) \
- V(WeakCell)
+ V(WasmInstanceObject) \
+ V(WeakCell) \
+ V(WeakFixedArray)
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
@@ -83,6 +84,9 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE bool ShouldVisitMapPointer() { return true; }
// A callback for visiting the map pointer in the object header.
V8_INLINE void VisitMapPointer(HeapObject* host, HeapObject** map);
+ // If this predicate returns false, then the heap visitor will fail
+ // in default Visit implemention for subclasses of JSObject.
+ V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
#define VISIT(type) V8_INLINE ResultType Visit##type(Map* map, type* object);
TYPED_VISITOR_ID_LIST(VISIT)
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 4e0f259c00..45a6422204 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -280,7 +280,9 @@ class UpdateTypedSlotHelper {
Callback callback) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
- SlotCallbackResult result = callback(&code);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&code));
+ DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
@@ -296,9 +298,12 @@ class UpdateTypedSlotHelper {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_target = old_target;
- SlotCallbackResult result = callback(&new_target);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- rinfo->set_target_address(Code::cast(new_target)->instruction_start());
+ rinfo->set_target_address(
+ Code::cast(new_target)->raw_instruction_start());
}
return result;
}
@@ -311,7 +316,9 @@ class UpdateTypedSlotHelper {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* old_target = rinfo->target_object();
Object* new_target = old_target;
- SlotCallbackResult result = callback(&new_target);
+ SlotCallbackResult result =
+ callback(reinterpret_cast<MaybeObject**>(&new_target));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
rinfo->set_target_object(HeapObject::cast(new_target));
}
@@ -319,7 +326,7 @@ class UpdateTypedSlotHelper {
}
// Updates a typed slot using an untyped slot callback.
- // The callback accepts Object** and returns SlotCallbackResult.
+ // The callback accepts MaybeObject** and returns SlotCallbackResult.
template <typename Callback>
static SlotCallbackResult UpdateTypedSlot(Isolate* isolate,
SlotType slot_type, Address addr,
@@ -337,7 +344,7 @@ class UpdateTypedSlotHelper {
return UpdateEmbeddedPointer(&rinfo, callback);
}
case OBJECT_SLOT: {
- return callback(reinterpret_cast<Object**>(addr));
+ return callback(reinterpret_cast<MaybeObject**>(addr));
}
case CLEARED_SLOT:
break;
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index b649c010ae..9feebbf4d5 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -103,7 +103,7 @@ void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
void ScavengeJob::ScheduleIdleTask(Heap* heap) {
- if (!idle_task_pending_ && heap->use_tasks()) {
+ if (!idle_task_pending_ && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
idle_task_pending_ = true;
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 2971db98cc..4b07f16d11 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SCAVENGER_INL_H_
#include "src/heap/scavenger.h"
+#include "src/objects-inl.h"
#include "src/objects/map.h"
namespace v8 {
@@ -30,13 +31,13 @@ bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
return false;
}
-void Scavenger::PageMemoryFence(Object* object) {
+void Scavenger::PageMemoryFence(MaybeObject* object) {
#ifdef THREAD_SANITIZER
// Perform a dummy acquire load to tell TSAN that there is no data race
// with page initialization.
- if (object->IsHeapObject()) {
- MemoryChunk* chunk =
- MemoryChunk::FromAddress(HeapObject::cast(object)->address());
+ HeapObject* heap_object;
+ if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
CHECK_NOT_NULL(chunk->synchronized_heap());
}
#endif
@@ -68,7 +69,7 @@ bool Scavenger::MigrateObject(Map* map, HeapObject* source, HeapObject* target,
return true;
}
-bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
+bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size) {
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
@@ -83,10 +84,10 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
MapWord map_word = object->map_word();
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
return true;
}
- *slot = target;
+ HeapObjectReference::Update(slot, target);
copied_list_.Push(ObjectAndSize(target, object_size));
copied_size_ += object_size;
@@ -95,8 +96,8 @@ bool Scavenger::SemiSpaceCopyObject(Map* map, HeapObject** slot,
return false;
}
-bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
- int object_size) {
+bool Scavenger::PromoteObject(Map* map, HeapObjectReference** slot,
+ HeapObject* object, int object_size) {
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(OLD_SPACE, object_size, alignment);
@@ -109,11 +110,10 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
MapWord map_word = object->map_word();
- *slot = map_word.ToForwardingAddress();
+ HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
return true;
}
- *slot = target;
-
+ HeapObjectReference::Update(slot, target);
if (!ContainsOnlyData(map->visitor_id())) {
promotion_list_.Push(ObjectAndSize(target, object_size));
}
@@ -123,7 +123,7 @@ bool Scavenger::PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
return false;
}
-void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
+void Scavenger::EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size) {
SLOW_DCHECK(object_size <= Page::kAllocatableMemory);
SLOW_DCHECK(object->SizeFromMap(map) == object_size);
@@ -139,7 +139,7 @@ void Scavenger::EvacuateObjectDefault(Map* map, HeapObject** slot,
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject(map, slot, object, object_size)) return;
- FatalProcessOutOfMemory("Scavenger: semi-space copy\n");
+ heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
}
void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
@@ -157,7 +157,8 @@ void Scavenger::EvacuateThinString(Map* map, HeapObject** slot,
return;
}
- EvacuateObjectDefault(map, slot, object, object_size);
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ object, object_size);
}
void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
@@ -187,17 +188,19 @@ void Scavenger::EvacuateShortcutCandidate(Map* map, HeapObject** slot,
return;
}
Map* map = first_word.ToMap();
- EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ first, first->SizeFromMap(map));
base::AsAtomicPointer::Relaxed_Store(
reinterpret_cast<Map**>(object->address()),
MapWord::FromForwardingAddress(*slot).ToMap());
return;
}
- EvacuateObjectDefault(map, slot, object, object_size);
+ EvacuateObjectDefault(map, reinterpret_cast<HeapObjectReference**>(slot),
+ object, object_size);
}
-void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
+void Scavenger::EvacuateObject(HeapObjectReference** slot, Map* map,
HeapObject* source) {
SLOW_DCHECK(heap_->InFromSpace(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
@@ -206,11 +209,15 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
// that require re-reading the map.
switch (map->visitor_id()) {
case kVisitThinString:
- EvacuateThinString(map, slot, reinterpret_cast<ThinString*>(source),
- size);
+ // At the moment we don't allow weak pointers to thin strings.
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ EvacuateThinString(map, reinterpret_cast<HeapObject**>(slot),
+ reinterpret_cast<ThinString*>(source), size);
break;
case kVisitShortcutCandidate:
- EvacuateShortcutCandidate(map, slot,
+ DCHECK(!(*slot)->IsWeakHeapObject());
+ // At the moment we don't allow weak pointers to cons strings.
+ EvacuateShortcutCandidate(map, reinterpret_cast<HeapObject**>(slot),
reinterpret_cast<ConsString*>(source), size);
break;
default:
@@ -219,7 +226,7 @@ void Scavenger::EvacuateObject(HeapObject** slot, Map* map,
}
}
-void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
+void Scavenger::ScavengeObject(HeapObjectReference** p, HeapObject* object) {
DCHECK(heap()->InFromSpace(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
@@ -228,8 +235,14 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
+ HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(heap()->InFromSpace(*p));
- *p = first_word.ToForwardingAddress();
+ if ((*p)->IsWeakHeapObject()) {
+ *p = HeapObjectReference::Weak(dest);
+ } else {
+ DCHECK((*p)->IsStrongHeapObject());
+ *p = HeapObjectReference::Strong(dest);
+ }
return;
}
@@ -242,13 +255,16 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
+ MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
+ MaybeObject* object = *slot;
if (heap->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ HeapObject* heap_object;
+ bool success = object->ToStrongOrWeakHeapObject(&heap_object);
+ USE(success);
+ DCHECK(success);
DCHECK(heap_object->IsHeapObject());
- ScavengeObject(reinterpret_cast<HeapObject**>(slot), heap_object);
+ ScavengeObject(reinterpret_cast<HeapObjectReference**>(slot), heap_object);
object = *slot;
// If the object was in from space before and is after executing the
@@ -274,11 +290,27 @@ void ScavengeVisitor::VisitPointers(HeapObject* host, Object** start,
for (Object** p = start; p < end; p++) {
Object* object = *p;
if (!heap_->InNewSpace(object)) continue;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
}
}
+void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) {
+ for (MaybeObject** p = start; p < end; p++) {
+ MaybeObject* object = *p;
+ if (!heap_->InNewSpace(object)) continue;
+ // Treat the weak reference as strong.
+ HeapObject* heap_object;
+ if (object->ToStrongOrWeakHeapObject(&heap_object)) {
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
+ heap_object);
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 3baba9521b..cd6c534704 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -23,34 +23,53 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
inline void VisitPointers(HeapObject* host, Object** start,
Object** end) final {
- for (Address slot_address = reinterpret_cast<Address>(start);
- slot_address < reinterpret_cast<Address>(end);
- slot_address += kPointerSize) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
+ for (Object** slot = start; slot < end; ++slot) {
Object* target = *slot;
- scavenger_->PageMemoryFence(target);
-
+ DCHECK(!HasWeakHeapObjectTag(target));
if (target->IsHeapObject()) {
- if (heap_->InFromSpace(target)) {
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- target = *slot;
- scavenger_->PageMemoryFence(target);
-
- if (heap_->InNewSpace(target)) {
- SLOW_DCHECK(target->IsHeapObject());
- SLOW_DCHECK(heap_->InToSpace(target));
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
- slot_address);
- }
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target)));
- } else if (record_slots_ &&
- MarkCompactCollector::IsOnEvacuationCandidate(
- HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(host, slot, target);
- }
+ HandleSlot(host, reinterpret_cast<Address>(slot),
+ HeapObject::cast(target));
+ }
+ }
+ }
+
+ inline void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final {
+ // Treat weak references as strong. TODO(marja): Proper weakness handling in
+ // the young generation.
+ for (MaybeObject** slot = start; slot < end; ++slot) {
+ MaybeObject* target = *slot;
+ HeapObject* heap_object;
+ if (target->ToStrongOrWeakHeapObject(&heap_object)) {
+ HandleSlot(host, reinterpret_cast<Address>(slot), heap_object);
+ }
+ }
+ }
+
+ inline void HandleSlot(HeapObject* host, Address slot_address,
+ HeapObject* target) {
+ HeapObjectReference** slot =
+ reinterpret_cast<HeapObjectReference**>(slot_address);
+ scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+
+ if (heap_->InFromSpace(target)) {
+ scavenger_->ScavengeObject(slot, target);
+ bool success = (*slot)->ToStrongOrWeakHeapObject(&target);
+ USE(success);
+ DCHECK(success);
+ scavenger_->PageMemoryFence(reinterpret_cast<MaybeObject*>(target));
+
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(target->IsHeapObject());
+ SLOW_DCHECK(heap_->InToSpace(target));
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot_address),
+ slot_address);
}
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(host, slot, target);
}
}
@@ -84,7 +103,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, int size) {
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
- target->IterateBody(target->map()->instance_type(), size, &visitor);
+ target->IterateBodyFast(target->map(), size, &visitor);
}
void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
@@ -106,7 +125,7 @@ void Scavenger::ScavengePage(MemoryChunk* page) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
page, [this](SlotType type, Address host_addr, Address addr) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- heap_->isolate(), type, addr, [this](Object** addr) {
+ heap_->isolate(), type, addr, [this](MaybeObject** addr) {
return CheckAndScavengeObject(heap(),
reinterpret_cast<Address>(addr));
});
@@ -164,6 +183,7 @@ void Scavenger::Finalize() {
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
Object** p) {
+ DCHECK(!HasWeakHeapObjectTag(*p));
ScavengePointer(p);
}
@@ -175,9 +195,10 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
+ DCHECK(!HasWeakHeapObjectTag(object));
if (!heap_->InNewSpace(object)) return;
- scavenger_->ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ scavenger_->ScavengeObject(reinterpret_cast<HeapObjectReference**>(p),
reinterpret_cast<HeapObject*>(object));
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index e0008ae694..de2f49f0e2 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -50,7 +50,7 @@ class Scavenger {
inline Heap* heap() { return heap_; }
- inline void PageMemoryFence(Object* object);
+ inline void PageMemoryFence(MaybeObject* object);
void AddPageToSweeperIfNecessary(MemoryChunk* page);
@@ -61,24 +61,24 @@ class Scavenger {
// Scavenges an object |object| referenced from slot |p|. |object| is required
// to be in from space.
- inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ inline void ScavengeObject(HeapObjectReference** p, HeapObject* object);
// Copies |source| to |target| and sets the forwarding pointer in |source|.
V8_INLINE bool MigrateObject(Map* map, HeapObject* source, HeapObject* target,
int size);
- V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ V8_INLINE bool SemiSpaceCopyObject(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size);
- V8_INLINE bool PromoteObject(Map* map, HeapObject** slot, HeapObject* object,
- int object_size);
+ V8_INLINE bool PromoteObject(Map* map, HeapObjectReference** slot,
+ HeapObject* object, int object_size);
- V8_INLINE void EvacuateObject(HeapObject** slot, Map* map,
+ V8_INLINE void EvacuateObject(HeapObjectReference** slot, Map* map,
HeapObject* source);
// Different cases for object evacuation.
- V8_INLINE void EvacuateObjectDefault(Map* map, HeapObject** slot,
+ V8_INLINE void EvacuateObjectDefault(Map* map, HeapObjectReference** slot,
HeapObject* object, int object_size);
V8_INLINE void EvacuateJSFunction(Map* map, HeapObject** slot,
@@ -135,6 +135,8 @@ class ScavengeVisitor final : public NewSpaceVisitor<ScavengeVisitor> {
V8_INLINE void VisitPointers(HeapObject* host, Object** start,
Object** end) final;
+ V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) final;
private:
Heap* const heap_;
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 8a7aca1694..ca6ce64e2f 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -8,8 +8,8 @@
#include "src/ast/context-slot-cache.h"
#include "src/compilation-cache.h"
#include "src/contexts.h"
-#include "src/factory.h"
#include "src/heap-symbols.h"
+#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
@@ -27,6 +27,7 @@
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
#include "src/regexp/jsregexp.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -77,11 +78,63 @@ const Heap::StructTable Heap::struct_table[] = {
#undef DATA_HANDLER_ELEMENT
};
+AllocationResult Heap::AllocateMap(InstanceType instance_type,
+ int instance_size,
+ ElementsKind elements_kind,
+ int inobject_properties) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE &&
+ !Map::CanHaveFastTransitionableElementsKind(instance_type),
+ IsDictionaryElementsKind(elements_kind) ||
+ IsTerminalElementsKind(elements_kind));
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
+
+ result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
+ return isolate()->factory()->InitializeMap(Map::cast(result), instance_type,
+ instance_size, elements_kind,
+ inobject_properties);
+}
+
+AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
+ int instance_size) {
+ Object* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ // Map::cast cannot be used due to uninitialized map field.
+ Map* map = reinterpret_cast<Map*>(result);
+ map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)),
+ SKIP_WRITE_BARRIER);
+ map->set_instance_type(instance_type);
+ map->set_instance_size(instance_size);
+ // Initialize to only containing tagged fields.
+ if (FLAG_unbox_double_fields) {
+ map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ }
+ // GetVisitorId requires a properly initialized LayoutDescriptor.
+ map->set_visitor_id(Map::GetVisitorId(map));
+ map->set_inobject_properties_start_or_constructor_function_index(0);
+ DCHECK(!map->IsJSObjectMap());
+ map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ map->SetInObjectUnusedPropertyFields(0);
+ map->set_bit_field(0);
+ map->set_bit_field2(0);
+ DCHECK(!map->is_in_retained_map_list());
+ int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+ Map::OwnsDescriptorsBit::encode(true) |
+ Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
+ map->set_bit_field3(bit_field3);
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+ return map;
+}
+
namespace {
void FinalizePartialMap(Heap* heap, Map* map) {
map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::kZero);
+ map->set_raw_transitions(MaybeObject::FromSmi(Smi::kZero));
map->set_instance_descriptors(heap->empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -92,6 +145,41 @@ void FinalizePartialMap(Heap* heap, Map* map) {
} // namespace
+AllocationResult Heap::Allocate(Map* map, AllocationSpace space) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(size, space);
+ if (!allocation.To(&result)) return allocation;
+ // New space objects are allocated white.
+ WriteBarrierMode write_barrier_mode =
+ space == NEW_SPACE ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ result->set_map_after_allocation(map, write_barrier_mode);
+ return result;
+}
+
+AllocationResult Heap::AllocateEmptyFixedTypedArray(
+ ExternalArrayType array_type) {
+ int size = OBJECT_POINTER_ALIGN(FixedTypedArrayBase::kDataOffset);
+
+ HeapObject* object = nullptr;
+ AllocationResult allocation = AllocateRaw(
+ size, OLD_SPACE,
+ array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
+ if (!allocation.To(&object)) return allocation;
+
+ object->set_map_after_allocation(MapForFixedTypedArray(array_type),
+ SKIP_WRITE_BARRIER);
+ FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+ elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
+ elements->set_external_pointer(
+ ExternalReference::fixed_typed_array_base_data_offset(isolate())
+ .address(),
+ SKIP_WRITE_BARRIER);
+ elements->set_length(0);
+ return elements;
+}
+
bool Heap::CreateInitialMaps() {
HeapObject* obj = nullptr;
{
@@ -112,6 +200,8 @@ bool Heap::CreateInitialMaps() {
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+ ALLOCATE_PARTIAL_MAP(WEAK_FIXED_ARRAY_TYPE, kVariableSizeSentinel,
+ weak_fixed_array);
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel,
fixed_cow_array)
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
@@ -128,12 +218,22 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty array.
{
- AllocationResult allocation = AllocateEmptyFixedArray();
- if (!allocation.To(&obj)) return false;
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
}
set_empty_fixed_array(FixedArray::cast(obj));
{
+ AllocationResult alloc = AllocateRaw(WeakFixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ WeakFixedArray::cast(obj)->set_length(0);
+ }
+ set_empty_weak_fixed_array(WeakFixedArray::cast(obj));
+
+ {
AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
}
@@ -177,11 +277,12 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty descriptor array.
{
STATIC_ASSERT(DescriptorArray::kFirstIndex != 0);
- AllocationResult allocation =
- AllocateUninitializedFixedArray(DescriptorArray::kFirstIndex, TENURED);
- if (!allocation.To(&obj)) return false;
+ int length = DescriptorArray::kFirstIndex;
+ int size = FixedArray::SizeFor(length);
+ if (!AllocateRaw(size, OLD_SPACE).To(&obj)) return false;
+ obj->set_map_after_allocation(descriptor_array_map(), SKIP_WRITE_BARRIER);
+ DescriptorArray::cast(obj)->set_length(length);
}
- obj->set_map_no_write_barrier(descriptor_array_map());
set_empty_descriptor_array(DescriptorArray::cast(obj));
DescriptorArray::cast(obj)->set(DescriptorArray::kDescriptorLengthIndex,
Smi::kZero);
@@ -191,6 +292,7 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
FinalizePartialMap(this, meta_map());
FinalizePartialMap(this, fixed_array_map());
+ FinalizePartialMap(this, weak_fixed_array_map());
FinalizePartialMap(this, fixed_cow_array_map());
FinalizePartialMap(this, descriptor_array_map());
FinalizePartialMap(this, undefined_map());
@@ -269,6 +371,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ ALLOCATE_VARSIZE_MAP(FEEDBACK_METADATA_TYPE, feedback_metadata)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
@@ -286,7 +389,17 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
- ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+ ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell);
+ {
+ // The invalid_prototype_validity_cell is needed for JSObject maps.
+ Smi* value = Smi::FromInt(Map::kPrototypeChainInvalid);
+ AllocationResult alloc = AllocateRaw(Cell::kSize, OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
+ Cell::cast(obj)->set_value(value);
+ set_invalid_prototype_validity_cell(Cell::cast(obj));
+ }
+
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
@@ -310,23 +423,29 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, simple_number_dictionary)
ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, string_table)
- ALLOCATE_VARSIZE_MAP(HASH_TABLE_TYPE, weak_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, array_list)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
+ ALLOCATE_VARSIZE_MAP(FUNCTION_CONTEXT_TYPE, function_context)
+ ALLOCATE_VARSIZE_MAP(CATCH_CONTEXT_TYPE, catch_context)
+ ALLOCATE_VARSIZE_MAP(WITH_CONTEXT_TYPE, with_context)
+ ALLOCATE_VARSIZE_MAP(DEBUG_EVALUATE_CONTEXT_TYPE, debug_evaluate_context)
+ ALLOCATE_VARSIZE_MAP(BLOCK_CONTEXT_TYPE, block_context)
+ ALLOCATE_VARSIZE_MAP(MODULE_CONTEXT_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(EVAL_CONTEXT_TYPE, eval_context)
+ ALLOCATE_VARSIZE_MAP(SCRIPT_CONTEXT_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+ ALLOCATE_VARSIZE_MAP(BOILERPLATE_DESCRIPTION_TYPE, boilerplate_description)
+
+ ALLOCATE_VARSIZE_MAP(NATIVE_CONTEXT_TYPE, native_context)
native_context_map()->set_visitor_id(kVisitNativeContext);
+ ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
+ side_effect_call_handler_info)
+ ALLOCATE_MAP(CALL_HANDLER_INFO_TYPE, CallHandlerInfo::kSize,
+ side_effect_free_call_handler_info)
+
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
@@ -342,11 +461,22 @@ bool Heap::CreateInitialMaps() {
}
{
- AllocationResult allocation = AllocateEmptyScopeInfo();
- if (!allocation.To(&obj)) return false;
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
}
-
set_empty_scope_info(ScopeInfo::cast(obj));
+
+ {
+ AllocationResult alloc = AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE);
+ if (!alloc.To(&obj)) return false;
+ obj->set_map_after_allocation(boilerplate_description_map(),
+ SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj)->set_length(0);
+ }
+ set_empty_boilerplate_description(BoilerplateDescription::cast(obj));
+
{
AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
if (!allocation.To(&obj)) return false;
@@ -361,30 +491,34 @@ bool Heap::CreateInitialMaps() {
set_false_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kFalse);
- { // Empty arrays
- {
- ByteArray * byte_array;
- if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
- set_empty_byte_array(byte_array);
- }
+ // Empty arrays.
+ {
+ if (!AllocateRaw(ByteArray::SizeFor(0), OLD_SPACE).To(&obj)) return false;
+ obj->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
+ ByteArray::cast(obj)->set_length(0);
+ set_empty_byte_array(ByteArray::cast(obj));
+ }
- {
- PropertyArray* property_array;
- if (!AllocatePropertyArray(0, TENURED).To(&property_array)) return false;
- set_empty_property_array(property_array);
+ {
+ if (!AllocateRaw(FixedArray::SizeFor(0), OLD_SPACE).To(&obj)) {
+ return false;
}
+ obj->set_map_after_allocation(property_array_map(), SKIP_WRITE_BARRIER);
+ PropertyArray::cast(obj)->initialize_length(0);
+ set_empty_property_array(PropertyArray::cast(obj));
+ }
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
- { \
- FixedTypedArrayBase* obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
- return false; \
- set_empty_fixed_##type##_array(obj); \
- }
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+ { \
+ FixedTypedArrayBase* obj; \
+ if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+ return false; \
+ set_empty_fixed_##type##_array(obj); \
+ }
- TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+ TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
- }
+
DCHECK(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -566,10 +700,6 @@ void Heap::CreateInitialObjects() {
set_retained_maps(ArrayList::cast(empty_fixed_array()));
set_retaining_path_targets(undefined_value());
- set_weak_object_to_code_table(*WeakHashTable::New(isolate(), 16, TENURED));
-
- set_weak_new_space_object_to_code_list(*ArrayList::New(isolate(), 16));
-
set_feedback_vectors_for_profiling_tools(undefined_value());
set_script_list(Smi::kZero);
@@ -584,6 +714,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ set_last_debugging_id(Smi::FromInt(SharedFunctionInfo::kNoDebuggingId));
set_next_template_serial_number(Smi::kZero);
// Allocate the empty OrderedHashMap.
@@ -606,6 +737,11 @@ void Heap::CreateInitialObjects() {
}
set_empty_ordered_hash_set(*empty_ordered_hash_set);
+ // Allocate the empty FeedbackMetadata.
+ Handle<FeedbackMetadata> empty_feedback_metadata =
+ factory->NewFeedbackMetadata(0);
+ set_empty_feedback_metadata(*empty_feedback_metadata);
+
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Script::TYPE_NATIVE);
@@ -633,16 +769,20 @@ void Heap::CreateInitialObjects() {
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
- set_species_protector(*cell);
+ set_array_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_typed_array_species_protector(*cell);
+
+ cell = factory->NewPropertyCell(factory->empty_string());
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_promise_species_protector(*cell);
Handle<Cell> string_length_overflow_cell = factory->NewCell(
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_string_length_protector(*string_length_overflow_cell);
- Handle<Cell> fast_array_iteration_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
- set_fast_array_iteration_protector(*fast_array_iteration_cell);
-
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_buffer_neutering_protector(*cell);
@@ -651,6 +791,10 @@ void Heap::CreateInitialObjects() {
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_hook_protector(*cell);
+ Handle<Cell> promise_resolve_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_promise_resolve_protector(*promise_resolve_cell);
+
cell = factory->NewPropertyCell(factory->empty_string());
cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_promise_then_protector(*cell);
@@ -690,6 +834,12 @@ void Heap::CreateInternalAccessorInfoObjects() {
roots_[k##AccessorName##AccessorRootIndex] = *acessor_info;
ACCESSOR_INFO_LIST(INIT_ACCESSOR_INFO)
#undef INIT_ACCESSOR_INFO
+
+#define INIT_SIDE_EFFECT_FLAG(AccessorName) \
+ AccessorInfo::cast(roots_[k##AccessorName##AccessorRootIndex]) \
+ ->set_has_no_side_effect(true);
+ SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(INIT_SIDE_EFFECT_FLAG)
+#undef INIT_SIDE_EFFECT_FLAG
}
} // namespace internal
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 498c34bd54..c9cd68d5c6 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -281,8 +281,9 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
}
bool PagedSpace::EnsureLinearAllocationArea(int size_in_bytes) {
- if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit())
+ if (allocation_info_.top() + size_in_bytes <= allocation_info_.limit()) {
return true;
+ }
return SlowRefillLinearAllocationArea(size_in_bytes);
}
@@ -458,8 +459,7 @@ AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
#endif
}
-
-MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+V8_WARN_UNUSED_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment) {
base::LockGuard<base::Mutex> guard(&mutex_);
return AllocateRaw(size_in_bytes, alignment);
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index d90cac90f2..5a94e1c3b9 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
+#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/concurrent-marking.h"
@@ -46,7 +47,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
Space* owner = page->owner();
DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
- owner == page->heap()->code_space());
+ owner == page->heap()->code_space() ||
+ owner == page->heap()->read_only_space());
#endif // DEBUG
}
@@ -59,10 +61,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
Heap* heap = space_->heap();
heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
+#ifdef ENABLE_MINOR_MC
if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
+#else
+ DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
+#endif // ENABLE_MINOR_MC
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@@ -338,7 +344,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
};
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
- if (heap_->use_tasks() && FLAG_concurrent_sweeping) {
+ if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
if (!MakeRoomForNewTasks()) {
// kMaxUnmapperTasks are already running. Avoid creating any more.
if (FLAG_trace_unmapper) {
@@ -348,7 +354,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
return;
}
- UnmapFreeMemoryTask* task = new UnmapFreeMemoryTask(heap_->isolate(), this);
+ auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
if (FLAG_trace_unmapper) {
PrintIsolate(heap_->isolate(),
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
@@ -359,8 +365,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
DCHECK_GE(active_unmapping_tasks_.Value(), 0);
active_unmapping_tasks_.Increment(1);
task_ids_[pending_unmapping_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
@@ -631,7 +636,15 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->categories_[i] = nullptr;
}
- heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(chunk);
+ if (owner->identity() == RO_SPACE) {
+ heap->incremental_marking()
+ ->non_atomic_marking_state()
+ ->bitmap(chunk)
+ ->MarkAllBits();
+ } else {
+ heap->incremental_marking()->non_atomic_marking_state()->ClearLiveness(
+ chunk);
+ }
DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
@@ -678,6 +691,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
Page* page = static_cast<Page*>(chunk);
heap()->incremental_marking()->SetNewSpacePageFlags(page);
page->AllocateLocalTracker();
+#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
heap()
@@ -685,6 +699,7 @@ Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
->non_atomic_marking_state()
->ClearLiveness(page);
}
+#endif // ENABLE_MINOR_MC
page->InitializationMemoryFence();
return page;
}
@@ -1402,15 +1417,6 @@ void MemoryChunk::ReleaseYoungGenerationBitmap() {
// -----------------------------------------------------------------------------
// PagedSpace implementation
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
- ObjectSpace::kObjectSpaceNewSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
- ObjectSpace::kObjectSpaceOldSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
- ObjectSpace::kObjectSpaceCodeSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
- ObjectSpace::kObjectSpaceMapSpace);
-
void Space::AddAllocationObserver(AllocationObserver* observer) {
allocation_observers_.push_back(observer);
StartNextInlineAllocationStep();
@@ -1472,7 +1478,6 @@ bool PagedSpace::HasBeenSetUp() { return true; }
void PagedSpace::TearDown() {
for (auto it = begin(); it != end();) {
Page* page = *(it++); // Will be erased.
- ArrayBufferTracker::FreeAll(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
}
anchor_.set_next_page(&anchor_);
@@ -1484,7 +1489,7 @@ void PagedSpace::RefillFreeList() {
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
// generation spaces out.
if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
- identity() != MAP_SPACE) {
+ identity() != MAP_SPACE && identity() != RO_SPACE) {
return;
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -1784,6 +1789,13 @@ void PagedSpace::FreeLinearAllocationArea() {
InlineAllocationStep(current_top, nullptr, nullptr, 0);
SetTopAndLimit(nullptr, nullptr);
DCHECK_GE(current_limit, current_top);
+
+ // The code page of the linear allocation area needs to be unprotected
+ // because we are going to write a filler into that memory area below.
+ if (identity() == CODE_SPACE) {
+ heap_->UnprotectAndRegisterMemoryChunk(
+ MemoryChunk::FromAddress(current_top));
+ }
Free(current_top, current_limit - current_top,
SpaceAccountingMode::kSpaceAccounted);
}
@@ -1850,7 +1862,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
if (!is_local()) {
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
}
size_t new_node_size = 0;
@@ -1859,13 +1872,6 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_GE(new_node_size, size_in_bytes);
-#ifdef DEBUG
- for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
- reinterpret_cast<Object**>(new_node->address())[i] =
- Smi::FromInt(kCodeZapValue);
- }
-#endif
-
// The old-space-step might have finished sweeping and restarted marking.
// Verify that it did not turn the page of the new node into an evacuation
// candidate.
@@ -1873,7 +1879,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- IncreaseAllocatedBytes(new_node_size, Page::FromAddress(new_node->address()));
+ Page* page = Page::FromAddress(new_node->address());
+ IncreaseAllocatedBytes(new_node_size, page);
Address start = new_node->address();
Address end = new_node->address() + new_node_size;
@@ -1881,6 +1888,9 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
if (limit != end) {
+ if (identity() == CODE_SPACE) {
+ heap_->UnprotectAndRegisterMemoryChunk(page);
+ }
Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
}
SetLinearAllocationArea(start, limit);
@@ -1927,7 +1937,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
- object->IterateBody(map->instance_type(), size, visitor);
+ object->IterateBody(map, size, visitor);
CHECK(object->address() + size <= top);
end_of_previous_object = object->address() + size;
}
@@ -2379,7 +2389,7 @@ void NewSpace::Verify() {
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor;
int size = object->Size();
- object->IterateBody(map->instance_type(), size, &visitor);
+ object->IterateBody(map, size, &visitor);
current += size;
} else {
@@ -2414,9 +2424,6 @@ void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
void SemiSpace::TearDown() {
// Properly uncommit memory to keep the allocator counters in sync.
if (is_committed()) {
- for (Page* p : *this) {
- ArrayBufferTracker::FreeAll(p);
- }
Uncommit();
}
current_capacity_ = maximum_capacity_ = 0;
@@ -2714,25 +2721,17 @@ void FreeListCategory::Reset() {
available_ = 0;
}
-FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
+FreeSpace* FreeListCategory::PickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = top();
- if (node == nullptr) return nullptr;
- set_top(node->next());
- *node_size = node->Size();
- available_ -= *node_size;
- return node;
-}
-
-FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
- size_t* node_size) {
- DCHECK(page()->CanAllocate());
- FreeSpace* node = PickNodeFromList(node_size);
- if ((node != nullptr) && (*node_size < minimum_size)) {
- Free(node->address(), *node_size, kLinkCategory);
+ if (node == nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
*node_size = 0;
return nullptr;
}
+ set_top(node->next());
+ *node_size = node->Size();
+ available_ -= *node_size;
return node;
}
@@ -2750,6 +2749,11 @@ FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
set_top(cur_node->next());
}
if (prev_non_evac_node != nullptr) {
+ MemoryChunk* chunk =
+ MemoryChunk::FromAddress(prev_non_evac_node->address());
+ if (chunk->owner()->identity() == CODE_SPACE) {
+ chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
+ }
prev_non_evac_node->set_next(cur_node->next());
}
*node_size = size;
@@ -2829,12 +2833,13 @@ size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
return 0;
}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
FreeListCategory* current = it.Next();
- node = current->PickNodeFromList(node_size);
+ node = current->PickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
@@ -2844,11 +2849,11 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
return node;
}
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size) {
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
+ size_t minimum_size, size_t* node_size) {
if (categories_[type] == nullptr) return nullptr;
FreeSpace* node =
- categories_[type]->TryPickNodeFromList(minimum_size, node_size);
+ categories_[type]->PickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
@@ -2882,7 +2887,8 @@ FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
FreeListCategoryType type =
SelectFastAllocationFreeListCategoryType(size_in_bytes);
for (int i = type; i < kHuge && node == nullptr; i++) {
- node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
+ node = FindNodeIn(static_cast<FreeListCategoryType>(i), size_in_bytes,
+ node_size);
}
if (node == nullptr) {
@@ -2895,7 +2901,7 @@ FreeSpace* FreeList::Allocate(size_t size_in_bytes, size_t* node_size) {
// We didn't find anything in the huge list. Now search the best fitting
// free list for a node that has at least the requested size.
type = SelectFreeListCategoryType(size_in_bytes);
- node = TryFindNodeIn(type, node_size, size_in_bytes);
+ node = TryFindNodeIn(type, size_in_bytes, node_size);
}
if (node != nullptr) {
@@ -3276,7 +3282,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
- Heap::kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection);
+ heap()->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
heap()->CreateFillerObjectAt(object->address(), object_size,
ClearRecordedSlots::kNo);
if (heap()->incremental_marking()->black_allocation()) {
@@ -3471,7 +3478,7 @@ void LargeObjectSpace::Verify() {
// Byte arrays and strings don't have interior pointers.
if (object->IsAbstractCode()) {
VerifyPointersVisitor code_visitor;
- object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
+ object->IterateBody(map, object->Size(), &code_visitor);
} else if (object->IsFixedArray()) {
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 1c8bad8dc5..e5377b0336 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -179,14 +179,10 @@ class FreeListCategory {
void Free(Address address, size_t size_in_bytes, FreeMode mode);
- // Picks a node from the list and stores its size in |node_size|. Returns
- // nullptr if the category is empty.
- FreeSpace* PickNodeFromList(size_t* node_size);
-
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
- FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
+ FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
@@ -1052,9 +1048,9 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
- const size_t commit_size,
- size_t* allocated);
+ V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
+ size_t* allocated);
bool CommitRawMemory(Address start, size_t length);
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
@@ -1389,9 +1385,10 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// filling it up with a recognizable non-nullptr bit pattern.
void ZapBlock(Address start, size_t size);
- MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm, Address start,
- size_t commit_size,
- size_t reserved_size);
+ V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
CodeRange* code_range() { return code_range_; }
Unmapper* unmapper() { return &unmapper_; }
@@ -1438,6 +1435,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
executable_memory_.erase(chunk);
+ chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
}
Isolate* isolate_;
@@ -1779,7 +1777,8 @@ class V8_EXPORT_PRIVATE FreeList {
// bytes. Returns the actual node size in node_size which can be bigger than
// size_in_bytes. This method returns null if the allocation request cannot be
// handled by the free list.
- MUST_USE_RESULT FreeSpace* Allocate(size_t size_in_bytes, size_t* node_size);
+ V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
+ size_t* node_size);
// Clear the free list.
void Reset();
@@ -1879,12 +1878,14 @@ class V8_EXPORT_PRIVATE FreeList {
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
- FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Tries to retrieve a node from the first category in a given |type|.
- // Returns nullptr if the category is empty.
- FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
- size_t minimum_size);
+ // Returns nullptr if the category is empty or the top entry is smaller
+ // than minimum_size.
+ FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
+ size_t* node_size);
// Searches a given |type| for a node of at least |minimum_size|.
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
@@ -1948,7 +1949,7 @@ class LocalAllocationBuffer {
LocalAllocationBuffer(const LocalAllocationBuffer& other);
LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
- MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
inline bool IsValid() { return allocation_info_.top() != nullptr; }
@@ -2103,17 +2104,17 @@ class V8_EXPORT_PRIVATE PagedSpace
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
// to be manually updated later.
- MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
// Allocate the requested number of bytes in the space and consider allocation
// alignment if needed.
- MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment);
size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
@@ -2293,24 +2294,25 @@ class V8_EXPORT_PRIVATE PagedSpace
inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
AllocationAlignment alignment);
- MUST_USE_RESULT bool RefillLinearAllocationAreaFromFreeList(
+ V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and retry free-list
// allocation. Returns false if there is not enough space and the caller
// has to retry after collecting garbage.
- MUST_USE_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent. Returns false
// if there is not enough space and the caller has to retry after
// collecting garbage.
- MUST_USE_RESULT virtual bool SlowRefillLinearAllocationArea(
+ V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
int size_in_bytes);
// Implementation of SlowAllocateRaw. Returns false if there is not enough
// space and the caller has to retry after collecting garbage.
- MUST_USE_RESULT bool RawSlowRefillLinearAllocationArea(int size_in_bytes);
+ V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
+ int size_in_bytes);
size_t area_size_;
@@ -2681,16 +2683,16 @@ class NewSpace : public SpaceWithLinearArea {
// Set the age mark in the active semispace.
void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
- MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+ V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
- MUST_USE_RESULT INLINE(
+ V8_WARN_UNUSED_RESULT INLINE(
AllocationResult AllocateRawUnaligned(int size_in_bytes));
- MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+ V8_WARN_UNUSED_RESULT INLINE(AllocationResult AllocateRaw(
int size_in_bytes, AllocationAlignment alignment));
- MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
+ V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
int size_in_bytes, AllocationAlignment alignment);
// Reset the allocation pointer to the beginning of the active semispace.
@@ -2806,9 +2808,10 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
// The space is temporary and not included in any snapshots.
bool snapshotable() override { return false; }
- MUST_USE_RESULT bool SweepAndRetryAllocation(int size_in_bytes) override;
+ V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
+ int size_in_bytes) override;
- MUST_USE_RESULT bool SlowRefillLinearAllocationArea(
+ V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
int size_in_bytes) override;
};
@@ -2880,6 +2883,14 @@ class MapSpace : public PagedSpace {
#endif
};
+// -----------------------------------------------------------------------------
+// Read Only space for all Immortal Immovable and Immutable objects
+
+class ReadOnlySpace : public PagedSpace {
+ public:
+ ReadOnlySpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
+};
// -----------------------------------------------------------------------------
// Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
@@ -2908,8 +2919,8 @@ class LargeObjectSpace : public Space {
// Shared implementation of AllocateRaw, AllocateRawCode and
// AllocateRawFixedArray.
- MUST_USE_RESULT AllocationResult
- AllocateRaw(int object_size, Executability executable);
+ V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
+ Executability executable);
// Available bytes for objects in this space.
inline size_t Available() override;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 724edf5721..3df5a5a53f 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "src/base/macros.h"
+#include "src/base/template-utils.h"
#include "src/counters.h"
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
@@ -35,7 +36,7 @@ void StoreBuffer::SetUp() {
VirtualMemory reservation;
if (!AllocVirtualMemory(kStoreBufferSize * 3, heap_->GetRandomMmapAddr(),
&reservation)) {
- V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
uintptr_t start_as_int = reinterpret_cast<uintptr_t>(reservation.address());
start_[0] =
@@ -59,7 +60,7 @@ void StoreBuffer::SetUp() {
if (!reservation.SetPermissions(reinterpret_cast<Address>(start_[0]),
kStoreBufferSize * kStoreBuffers,
PageAllocator::kReadWrite)) {
- V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
+ heap_->FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
current_ = 0;
top_ = start_[current_];
@@ -94,9 +95,8 @@ void StoreBuffer::FlipStoreBuffers() {
if (!task_running_ && FLAG_concurrent_store_buffer) {
task_running_ = true;
- Task* task = new Task(heap_->isolate(), this);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ base::make_unique<Task>(heap_->isolate(), this));
}
}
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 2072e407e9..f72f041c78 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -4,6 +4,7 @@
#include "src/heap/sweeper.h"
+#include "src/base/template-utils.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
@@ -47,15 +48,18 @@ Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
USE(pause_or_complete_scope_);
if (!sweeping_in_progress_) return;
- old_space_sweeping_list_ = std::move(sweeper_->sweeping_list_[OLD_SPACE]);
- sweeper_->sweeping_list_[OLD_SPACE].clear();
+ int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
+ old_space_sweeping_list_ =
+ std::move(sweeper_->sweeping_list_[old_space_index]);
+ sweeper_->sweeping_list_[old_space_index].clear();
}
Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
if (!sweeping_in_progress_) return;
- sweeper_->sweeping_list_[OLD_SPACE] = std::move(old_space_sweeping_list_);
+ sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
+ std::move(old_space_sweeping_list_);
// old_space_sweeping_list_ does not need to be cleared as we don't use it.
}
@@ -78,17 +82,16 @@ class Sweeper::SweeperTask final : public CancelableTask {
void RunInternal() final {
TRACE_BACKGROUND_GC(tracer_,
GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
- DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
- DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_PAGED_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
+ DCHECK(IsValidSweepingSpace(space_to_start_));
+ const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
+ for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
+ const AllocationSpace space_id = static_cast<AllocationSpace>(
+ FIRST_GROWABLE_PAGED_SPACE +
+ ((i + offset) % kNumberOfSweepingSpaces));
// Do not sweep code space concurrently.
- if (static_cast<AllocationSpace>(space_id) == CODE_SPACE) continue;
- DCHECK_GE(space_id, FIRST_PAGED_SPACE);
- DCHECK_LE(space_id, LAST_PAGED_SPACE);
- sweeper_->SweepSpaceFromTask(static_cast<AllocationSpace>(space_id));
+ if (space_id == CODE_SPACE) continue;
+ DCHECK(IsValidSweepingSpace(space_id));
+ sweeper_->SweepSpaceFromTask(space_id);
}
num_sweeping_tasks_->Decrement(1);
pending_sweeper_tasks_->Signal();
@@ -136,7 +139,9 @@ void Sweeper::StartSweeping() {
MajorNonAtomicMarkingState* marking_state =
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
- std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
+ int space_index = GetSweepSpaceIndex(space);
+ std::sort(sweeping_list_[space_index].begin(),
+ sweeping_list_[space_index].end(),
[marking_state](Page* a, Page* b) {
return marking_state->live_bytes(a) <
marking_state->live_bytes(b);
@@ -152,13 +157,12 @@ void Sweeper::StartSweeperTasks() {
ForAllSweepingSpaces([this](AllocationSpace space) {
DCHECK(IsValidSweepingSpace(space));
num_sweeping_tasks_.Increment(1);
- SweeperTask* task = new SweeperTask(heap_->isolate(), this,
- &pending_sweeper_tasks_semaphore_,
- &num_sweeping_tasks_, space);
+ auto task = base::make_unique<SweeperTask>(
+ heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
+ &num_sweeping_tasks_, space);
DCHECK_LT(num_tasks_, kMaxSweeperTasks);
task_ids_[num_tasks_++] = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
});
ScheduleIncrementalSweepingTask();
}
@@ -178,7 +182,7 @@ void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
base::LockGuard<base::Mutex> guard(&mutex_);
- SweptList& list = swept_list_[space->identity()];
+ SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
if (!list.empty()) {
auto last_page = list.back();
list.pop_back();
@@ -215,8 +219,9 @@ void Sweeper::EnsureCompleted() {
AbortAndWaitForTasks();
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { CHECK(sweeping_list_[space].empty()); });
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
+ });
sweeping_in_progress_ = false;
}
@@ -283,8 +288,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
+ p->heap()->CreateFillerObjectAt(
+ free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
+ ClearFreedMemoryMode::kClearFreedMemory);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
SlotSet::KEEP_EMPTY_BUCKETS);
@@ -323,7 +329,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
- ClearRecordedSlots::kNo);
+ ClearRecordedSlots::kNo,
+ ClearFreedMemoryMode::kClearFreedMemory);
}
RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
@@ -378,7 +385,7 @@ bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
if (Page* page = GetSweepingPageSafe(identity)) {
ParallelSweepPage(page, identity);
}
- return sweeping_list_[identity].empty();
+ return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
}
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
@@ -435,7 +442,7 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
{
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].push_back(page);
+ swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
}
return max_freed;
}
@@ -463,7 +470,7 @@ void Sweeper::AddPage(AllocationSpace space, Page* page,
DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
}
DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state().Value());
- sweeping_list_[space].push_back(page);
+ sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
}
void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
@@ -480,10 +487,11 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
base::LockGuard<base::Mutex> guard(&mutex_);
DCHECK(IsValidSweepingSpace(space));
+ int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
- if (!sweeping_list_[space].empty()) {
- page = sweeping_list_[space].front();
- sweeping_list_[space].pop_front();
+ if (!sweeping_list_[space_index].empty()) {
+ page = sweeping_list_[space_index].front();
+ sweeping_list_[space_index].pop_front();
}
return page;
}
@@ -550,12 +558,11 @@ void Sweeper::StartIterabilityTasks() {
DCHECK(!iterability_task_started_);
if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
- IterabilityTask* task = new IterabilityTask(heap_->isolate(), this,
- &iterability_task_semaphore_);
+ auto task = base::make_unique<IterabilityTask>(
+ heap_->isolate(), this, &iterability_task_semaphore_);
iterability_task_id_ = task->id();
iterability_task_started_ = true;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index 6eee902bcc..ecf1f8d4d2 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -51,7 +51,8 @@ class Sweeper {
void FilterOldSpaceSweepingPages(Callback callback) {
if (!sweeping_in_progress_) return;
- SweepingList* sweeper_list = &sweeper_->sweeping_list_[OLD_SPACE];
+ SweepingList* sweeper_list =
+ &sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)];
// Iteration here is from most free space to least free space.
for (auto it = old_space_sweeping_list_.begin();
it != old_space_sweeping_list_.end(); it++) {
@@ -123,7 +124,8 @@ class Sweeper {
class IterabilityTask;
class SweeperTask;
- static const int kNumberOfSweepingSpaces = LAST_PAGED_SPACE + 1;
+ static const int kNumberOfSweepingSpaces =
+ LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
static const int kMaxSweeperTasks = 3;
template <typename Callback>
@@ -137,7 +139,7 @@ class Sweeper {
bool IsDoneSweeping() const {
bool is_done = true;
ForAllSweepingSpaces([this, &is_done](AllocationSpace space) {
- if (!sweeping_list_[space].empty()) is_done = false;
+ if (!sweeping_list_[GetSweepSpaceIndex(space)].empty()) is_done = false;
});
return is_done;
}
@@ -159,11 +161,17 @@ class Sweeper {
void MakeIterable(Page* page);
bool IsValidIterabilitySpace(AllocationSpace space) {
- return space == NEW_SPACE;
+ return space == NEW_SPACE || space == RO_SPACE;
}
- bool IsValidSweepingSpace(AllocationSpace space) {
- return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+ static bool IsValidSweepingSpace(AllocationSpace space) {
+ return space >= FIRST_GROWABLE_PAGED_SPACE &&
+ space <= LAST_GROWABLE_PAGED_SPACE;
+ }
+
+ static int GetSweepSpaceIndex(AllocationSpace space) {
+ DCHECK(IsValidSweepingSpace(space));
+ return space - FIRST_GROWABLE_PAGED_SPACE;
}
Heap* const heap_;
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index b89dceb786..14589b8577 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -72,7 +72,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
return reinterpret_cast<Address>(pc_);
}
@@ -117,6 +118,14 @@ Address RelocInfo::target_external_reference() {
return Memory::Address_at(pc_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Memory::Address_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(pc_, sizeof(Address));
+ }
+}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
@@ -129,6 +138,14 @@ Address RelocInfo::target_internal_reference_address() {
return reinterpret_cast<Address>(pc_);
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Memory::Address_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(pc_, sizeof(Address));
+ }
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -144,6 +161,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Memory::Address_at(pc_);
+}
+
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
@@ -171,6 +193,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a1b8dada6e..8277b4bd4b 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2364,6 +2364,7 @@ void Assembler::sqrtsd(XMMRegister dst, Operand src) {
}
void Assembler::haddps(XMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
@@ -2371,7 +2372,7 @@ void Assembler::haddps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+void Assembler::andpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2379,8 +2380,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
-void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+void Assembler::orpd(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
@@ -2725,6 +2725,17 @@ void Assembler::pshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EMIT(shuffle);
}
+void Assembler::pblendw(XMMRegister dst, Operand src, uint8_t mask) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x0E);
+ emit_sse_operand(dst, src);
+ EMIT(mask);
+}
+
void Assembler::pextrb(Operand dst, XMMRegister src, int8_t offset) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2959,6 +2970,12 @@ void Assembler::vpshufd(XMMRegister dst, Operand src, uint8_t shuffle) {
EMIT(shuffle);
}
+void Assembler::vpblendw(XMMRegister dst, XMMRegister src1, Operand src2,
+ uint8_t mask) {
+ vinstr(0x0E, dst, src1, src2, k66, k0F3A, kWIG);
+ EMIT(mask);
+}
+
void Assembler::vpextrb(Operand dst, XMMRegister src, int8_t offset) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG);
EMIT(offset);
@@ -3146,7 +3163,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index f4e495c36b..76594f7bd4 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -103,8 +103,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
- sizeof(Register) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -422,10 +422,9 @@ class Operand {
// TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
+ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kPointerSize,
"Operand must be small enough to pass it by value");
-static_assert(IS_TRIVIALLY_COPYABLE(Operand),
- "Operand must be trivially copyable to pass it by value");
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
@@ -677,6 +676,7 @@ class Assembler : public AssemblerBase {
// Arithmetics
void adc(Register dst, int32_t imm32);
+ void adc(Register dst, Register src) { adc(dst, Operand(src)); }
void adc(Register dst, Operand src);
void add(Register dst, Register src) { add(dst, Operand(src)); }
@@ -767,6 +767,7 @@ class Assembler : public AssemblerBase {
void sar_cl(Register dst) { sar_cl(Operand(dst)); }
void sar_cl(Operand dst);
+ void sbb(Register dst, Register src) { sbb(dst, Operand(src)); }
void sbb(Register dst, Operand src);
void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
@@ -1064,8 +1065,10 @@ class Assembler : public AssemblerBase {
void sqrtsd(XMMRegister dst, XMMRegister src) { sqrtsd(dst, Operand(src)); }
void sqrtsd(XMMRegister dst, Operand src);
- void andpd(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, XMMRegister src);
+ void andpd(XMMRegister dst, XMMRegister src) { andpd(dst, Operand(src)); }
+ void andpd(XMMRegister dst, Operand src);
+ void orpd(XMMRegister dst, XMMRegister src) { orpd(dst, Operand(src)); }
+ void orpd(XMMRegister dst, Operand src);
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, Operand src);
@@ -1130,6 +1133,11 @@ class Assembler : public AssemblerBase {
}
void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
+ void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask) {
+ pblendw(dst, Operand(src), mask);
+ }
+ void pblendw(XMMRegister dst, Operand src, uint8_t mask);
+
void pextrb(Register dst, XMMRegister src, int8_t offset) {
pextrb(Operand(dst), src, offset);
}
@@ -1346,6 +1354,12 @@ class Assembler : public AssemblerBase {
void vminsd(XMMRegister dst, XMMRegister src1, Operand src2) {
vsd(0x5d, dst, src1, src2);
}
+ void vsqrtsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsqrtsd(dst, src1, Operand(src2));
+ }
+ void vsqrtsd(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vsd(0x51, dst, src1, src2);
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vaddss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
@@ -1384,6 +1398,12 @@ class Assembler : public AssemblerBase {
void vminss(XMMRegister dst, XMMRegister src1, Operand src2) {
vss(0x5d, dst, src1, src2);
}
+ void vsqrtss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsqrtss(dst, src1, Operand(src2));
+ }
+ void vsqrtss(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vss(0x51, dst, src1, src2);
+ }
void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
@@ -1396,6 +1416,12 @@ class Assembler : public AssemblerBase {
void vrsqrtps(XMMRegister dst, Operand src) {
vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
}
+ void vhaddps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vhaddps(dst, src1, Operand(src2));
+ }
+ void vhaddps(XMMRegister dst, XMMRegister src1, Operand src2) {
+ vinstr(0x7C, dst, src1, src2, kF2, k0F, kWIG);
+ }
void vmovaps(XMMRegister dst, XMMRegister src) {
vps(0x28, dst, xmm0, Operand(src));
}
@@ -1420,6 +1446,12 @@ class Assembler : public AssemblerBase {
}
void vpshufd(XMMRegister dst, Operand src, uint8_t shuffle);
+ void vpblendw(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ uint8_t mask) {
+ vpblendw(dst, src1, Operand(src2), mask);
+ }
+ void vpblendw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t mask);
+
void vpextrb(Register dst, XMMRegister src, int8_t offset) {
vpextrb(Operand(dst), src, offset);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index bdae590078..48ce8ac29e 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -150,70 +150,56 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(scratch, Immediate(1));
__ Cvtsi2sd(double_result, scratch);
- if (exponent_type() == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ movsd(double_exponent,
- FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- Label fast_power, try_arithmetic_simplification;
- __ DoubleToI(exponent, double_exponent, double_scratch,
- &try_arithmetic_simplification,
- &try_arithmetic_simplification);
- __ jmp(&int_exponent);
-
- __ bind(&try_arithmetic_simplification);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cvttsd2si(exponent, Operand(double_exponent));
- __ cmp(exponent, Immediate(0x1));
- __ j(overflow, &call_runtime);
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ sub(esp, Immediate(kDoubleSize));
- __ movsd(Operand(esp, 0), double_exponent);
- __ fld_d(Operand(esp, 0)); // E
- __ movsd(Operand(esp, 0), double_base);
- __ fld_d(Operand(esp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1); // 2^X
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ test_b(eax,
- Immediate(0x5F)); // We check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(esp, 0));
- __ movsd(double_result, Operand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&done);
+ Label fast_power, try_arithmetic_simplification;
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ &try_arithmetic_simplification, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cvttsd2si(exponent, Operand(double_exponent));
+ __ cmp(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movsd(Operand(esp, 0), double_exponent);
+ __ fld_d(Operand(esp, 0)); // E
+ __ movsd(Operand(esp, 0), double_base);
+ __ fld_d(Operand(esp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1); // 2^X
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ test_b(eax, Immediate(0x5F)); // We check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done);
- __ bind(&fast_power_failed);
- __ fninit();
- __ add(esp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
// Calculate power with integer exponent.
__ bind(&int_exponent);
@@ -423,6 +409,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ mov(edi, Operand::StaticVariable(pending_handler_entrypoint_address));
__ jmp(edi);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index ffe3c9150b..308943e923 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -5,8 +5,9 @@
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
-#include "src/factory-inl.h"
+#include "src/heap/factory-inl.h"
#include "src/heap/heap.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index ad394020e5..5f07f0803d 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -819,6 +819,13 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x0E:
+ AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(current));
+ current++;
+ break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightOperand(current);
@@ -868,6 +875,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x51:
+ AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -904,6 +916,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%d", *reinterpret_cast<int8_t*>(current));
current++;
break;
+ case 0x7C:
+ AppendToBuffer("vhaddps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -911,6 +928,11 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x51:
+ AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1946,6 +1968,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x0E) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("pblendw %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(",%d", *reinterpret_cast<uint8_t*>(data));
+ data++;
} else if (*data == 0x14) {
data++;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/ia32/frame-constants-ia32.h
index a262b92af9..f0c1dc835f 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/ia32/frame-constants-ia32.h
@@ -10,41 +10,43 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -6 * kPointerSize;
+ static constexpr int kCallerFPOffset = -6 * kPointerSize;
- static const int kNewTargetArgOffset = +2 * kPointerSize;
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
+ static constexpr int kNewTargetArgOffset = +2 * kPointerSize;
+ static constexpr int kFunctionArgOffset = +3 * kPointerSize;
+ static constexpr int kReceiverArgOffset = +4 * kPointerSize;
+ static constexpr int kArgcOffset = +5 * kPointerSize;
+ static constexpr int kArgvOffset = +6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = +2 * kPointerSize;
- static const int kConstantPoolOffset = 0; // Not used
+ static constexpr int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 81e91f1e4f..e0f3f73804 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -195,7 +195,8 @@ void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
void TurboAssembler::LoadUint32(XMMRegister dst, Operand src) {
Label done;
cmp(src, Immediate(0));
- ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
+ ExternalReference uint32_bias =
+ ExternalReference::address_of_uint32_bias(isolate());
Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, Operand::StaticVariable(uint32_bias));
@@ -487,6 +488,19 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
+ Push(object);
+ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+ test_b(FieldOperand(object, Map::kBitFieldOffset),
+ Immediate(Map::IsConstructorBit::kMask));
+ Pop(object);
+ Check(not_zero, AbortReason::kOperandIsNotAConstructor);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -846,9 +860,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- Address bytes_address = reinterpret_cast<Address>(stream->bytes());
- mov(kOffHeapTrampolineRegister, Immediate(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ mov(kOffHeapTrampolineRegister, Immediate(entry, RelocInfo::OFF_HEAP_TARGET));
jmp(kOffHeapTrampolineRegister);
}
@@ -991,27 +1004,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- cmpb(Operand::StaticVariable(debug_is_active), Immediate(0));
- j(equal, &skip_hook);
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
- j(not_equal, &call_hook);
-
- Register scratch = ecx;
- mov(scratch, FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- mov(scratch, FieldOperand(scratch, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(scratch, &skip_hook);
- mov(scratch, FieldOperand(scratch, DebugInfo::kFlagsOffset));
- test(scratch, Immediate(Smi::FromInt(DebugInfo::kBreakAtEntry)));
- j(zero, &skip_hook);
+ j(equal, &skip_hook);
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1420,6 +1419,12 @@ void TurboAssembler::Popcnt(Register dst, Operand src) {
UNREACHABLE();
}
+void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
+ cmp(in_out, Immediate(kClearedWeakHeapObject));
+ j(equal, target_if_cleared);
+
+ and_(in_out, Immediate(~kWeakHeapObjectMask));
+}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
@@ -1659,7 +1664,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
-void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ mov(kSpeculationPoisonRegister, Immediate(-1));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index ce299ba5a7..c51e6b9d2d 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -179,7 +179,7 @@ class TurboAssembler : public Assembler {
void ShlPair(Register high, Register low, uint8_t imm8);
void ShlPair_cl(Register high, Register low);
void ShrPair(Register high, Register low, uint8_t imm8);
- void ShrPair_cl(Register high, Register src);
+ void ShrPair_cl(Register high, Register low);
void SarPair(Register high, Register low, uint8_t imm8);
void SarPair_cl(Register high, Register low);
@@ -222,12 +222,15 @@ class TurboAssembler : public Assembler {
} \
}
+ AVX_OP2_WITH_TYPE(Rcpps, rcpps, XMMRegister, const Operand&)
+ AVX_OP2_WITH_TYPE(Rsqrtps, rsqrtps, XMMRegister, const Operand&)
AVX_OP2_WITH_TYPE(Movdqu, movdqu, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Movdqu, movdqu, Operand, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Register)
AVX_OP2_WITH_TYPE(Movd, movd, XMMRegister, Operand)
AVX_OP2_WITH_TYPE(Movd, movd, Register, XMMRegister)
AVX_OP2_WITH_TYPE(Movd, movd, Operand, XMMRegister)
+ AVX_OP2_WITH_TYPE(Cvtdq2ps, cvtdq2ps, XMMRegister, Operand)
#undef AVX_OP2_WITH_TYPE
@@ -251,8 +254,12 @@ class TurboAssembler : public Assembler {
AVX_OP3_XO(Psubw, psubw)
AVX_OP3_XO(Psubd, psubd)
AVX_OP3_XO(Pxor, pxor)
+ AVX_OP3_XO(Andps, andps)
+ AVX_OP3_XO(Andpd, andpd)
AVX_OP3_XO(Xorps, xorps)
AVX_OP3_XO(Xorpd, xorpd)
+ AVX_OP3_XO(Sqrtss, sqrtss)
+ AVX_OP3_XO(Sqrtsd, sqrtsd)
#undef AVX_OP3_XO
#undef AVX_OP3_WITH_TYPE
@@ -532,6 +539,9 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSBoundFunction,
// enabled via --debug-code.
void AssertBoundFunction(Register object);
@@ -587,7 +597,7 @@ class MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
// ---------------------------------------------------------------------------
// Utilities
@@ -603,6 +613,10 @@ class MacroAssembler : public TurboAssembler {
void PopReturnAddressTo(Register dst) { pop(dst); }
// ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register in_out, Label* target_if_cleared);
+
+ // ---------------------------------------------------------------------------
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value);
diff --git a/deps/v8/src/ia32/sse-instr.h b/deps/v8/src/ia32/sse-instr.h
index 7996ee50be..cb4cba3ad0 100644
--- a/deps/v8/src/ia32/sse-instr.h
+++ b/deps/v8/src/ia32/sse-instr.h
@@ -44,6 +44,8 @@
V(pxor, 66, 0F, EF)
#define SSSE3_INSTRUCTION_LIST(V) \
+ V(phaddd, 66, 0F, 38, 02) \
+ V(phaddw, 66, 0F, 38, 01) \
V(pshufb, 66, 0F, 38, 00) \
V(psignb, 66, 0F, 38, 08) \
V(psignw, 66, 0F, 38, 09) \
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 9800149ae1..7ffa263410 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -17,14 +17,19 @@ namespace internal {
using compiler::CodeAssemblerState;
using compiler::Node;
+template <typename T>
+using TNode = compiler::TNode<T>;
+template <typename T>
+using SloppyTNode = compiler::SloppyTNode<T>;
//////////////////// Private helpers.
// Loads dataX field from the DataHandler object.
-Node* AccessorAssembler::LoadHandlerDataField(Node* handler, int data_index) {
+TNode<Object> AccessorAssembler::LoadHandlerDataField(
+ SloppyTNode<DataHandler> handler, int data_index) {
#ifdef DEBUG
- Node* handler_map = LoadMap(handler);
- Node* instance_type = LoadMapInstanceType(handler_map);
+ TNode<Map> handler_map = LoadMap(handler);
+ TNode<Int32T> instance_type = LoadMapInstanceType(handler_map);
#endif
CSA_ASSERT(this,
Word32Or(InstanceTypeEqual(instance_type, LOAD_HANDLER_TYPE),
@@ -124,7 +129,7 @@ void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
Label next_entry(this);
Node* cached_map =
- LoadWeakCellValue(LoadFixedArrayElement(feedback, map_index));
+ LoadWeakCellValue(CAST(LoadFixedArrayElement(feedback, map_index)));
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
// Found, now call handler.
@@ -144,7 +149,7 @@ void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
start_index, end_index,
[this, receiver_map, feedback, if_handler, var_handler](Node* index) {
Node* cached_map =
- LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+ LoadWeakCellValue(CAST(LoadFixedArrayElement(feedback, index)));
Label next_entry(this);
GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
@@ -339,7 +344,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
Node* length = LoadStringLengthAsWord(holder);
GotoIf(UintPtrGreaterThanOrEqual(intptr_index, length), &if_oob);
TNode<Int32T> code = StringCharCodeAt(holder, intptr_index);
- TNode<String> result = StringFromCharCode(code);
+ TNode<String> result = StringFromSingleCharCode(code);
Return(result);
BIND(&if_oob);
@@ -467,11 +472,10 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
- Node* context_cell = Select(
+ TNode<Object> context_cell = Select<Object>(
IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
[=] { return LoadHandlerDataField(handler, 3); },
- [=] { return LoadHandlerDataField(handler, 2); },
- MachineRepresentation::kTagged);
+ [=] { return LoadHandlerDataField(handler, 2); });
Node* context = LoadWeakCellValueUnchecked(context_cell);
Node* foreign =
@@ -609,15 +613,9 @@ Node* AccessorAssembler::HandleProtoHandler(
// Check prototype validity cell.
//
{
- Label done(this);
- Node* validity_cell =
+ Node* maybe_validity_cell =
LoadObjectField(handler, ICHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, SmiConstant(0)), &done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
- miss);
- Goto(&done);
- BIND(&done);
+ CheckPrototypeValidityCell(maybe_validity_cell, miss);
}
//
@@ -790,7 +788,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
ElementSupport support_elements) {
Label if_smi_handler(this), if_nonsmi_handler(this);
Label if_proto_handler(this), if_element_handler(this), call_handler(this),
- store_global(this);
+ store_transition_or_global(this);
Branch(TaggedIsSmi(handler), &if_smi_handler, &if_nonsmi_handler);
@@ -855,8 +853,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&data);
// Handle non-transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr,
- miss);
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, miss);
}
BIND(&if_proxy);
@@ -866,7 +863,7 @@ void AccessorAssembler::HandleStoreICHandlerCase(
BIND(&if_nonsmi_handler);
{
Node* handler_map = LoadMap(handler);
- GotoIf(IsWeakCellMap(handler_map), &store_global);
+ GotoIf(IsWeakCellMap(handler_map), &store_transition_or_global);
Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
}
@@ -881,14 +878,299 @@ void AccessorAssembler::HandleStoreICHandlerCase(
p->value, p->slot, p->vector);
}
- BIND(&store_global);
+ BIND(&store_transition_or_global);
{
// Load value or miss if the {handler} weak cell is cleared.
- Node* cell = LoadWeakCellValue(handler, miss);
+ Node* map_or_property_cell = LoadWeakCellValue(handler, miss);
- ExitPoint direct_exit(this);
- StoreGlobalIC_PropertyCellCase(cell, p->value, &direct_exit, miss);
+ Label store_global(this), store_transition(this);
+ Branch(IsMap(map_or_property_cell), &store_transition, &store_global);
+
+ BIND(&store_global);
+ {
+ ExitPoint direct_exit(this);
+ StoreGlobalIC_PropertyCellCase(map_or_property_cell, p->value,
+ &direct_exit, miss);
+ }
+ BIND(&store_transition);
+ HandleStoreICTransitionMapHandlerCase(p, map_or_property_cell, miss, false);
+ Return(p->value);
+ }
+}
+
+void AccessorAssembler::HandleStoreICTransitionMapHandlerCase(
+ const StoreICParameters* p, Node* transition_map, Label* miss,
+ bool validate_transition_handler) {
+ Node* maybe_validity_cell =
+ LoadObjectField(transition_map, Map::kPrototypeValidityCellOffset);
+ CheckPrototypeValidityCell(maybe_validity_cell, miss);
+
+ TNode<Uint32T> bitfield3 = LoadMapBitField3(transition_map);
+ CSA_ASSERT(this, IsClearWord32<Map::IsDictionaryMapBit>(bitfield3));
+ GotoIf(IsSetWord32<Map::IsDeprecatedBit>(bitfield3), miss);
+
+ // Load last descriptor details.
+ Node* nof = DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ CSA_ASSERT(this, WordNotEqual(nof, IntPtrConstant(0)));
+ Node* descriptors = LoadMapDescriptors(transition_map);
+
+ Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
+ Node* last_key_index = IntPtrAdd(
+ IntPtrConstant(DescriptorArray::ToKeyIndex(-1)), IntPtrMul(nof, factor));
+ if (validate_transition_handler) {
+ Node* key = LoadFixedArrayElement(descriptors, last_key_index);
+ GotoIf(WordNotEqual(key, p->name), miss);
+ } else {
+ CSA_ASSERT(
+ this,
+ WordEqual(LoadFixedArrayElement(descriptors, last_key_index), p->name));
+ }
+ Node* details =
+ LoadDetailsByKeyIndex<DescriptorArray>(descriptors, last_key_index);
+ if (validate_transition_handler) {
+ // Follow transitions only in the following cases:
+ // 1) name is a non-private symbol and attributes equal to NONE,
+ // 2) name is a private symbol and attributes equal to DONT_ENUM.
+ Label attributes_ok(this);
+ const int kAttributesDontDeleteReadOnlyMask =
+ PropertyDetails::kAttributesDontDeleteMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ // Both DontDelete and ReadOnly attributes must not be set.
+ GotoIf(IsSetWord32(details, kAttributesDontDeleteReadOnlyMask), miss);
+
+ // DontEnum attribute is allowed only for private symbols and vice versa.
+ Branch(Word32Equal(
+ IsSetWord32(details, PropertyDetails::kAttributesDontEnumMask),
+ IsPrivateSymbol(p->name)),
+ &attributes_ok, miss);
+
+ BIND(&attributes_ok);
+ }
+
+ OverwriteExistingFastDataProperty(p->receiver, transition_map, descriptors,
+ last_key_index, details, p->value, miss,
+ true);
+}
+
+void AccessorAssembler::CheckFieldType(Node* descriptors, Node* name_index,
+ Node* representation, Node* value,
+ Label* bailout) {
+ Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
+ // Ignore FLAG_track_fields etc. and always emit code for all checks,
+ // because this builtin is part of the snapshot and therefore should
+ // be flag independent.
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kSmi)),
+ &r_smi);
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &r_double);
+ GotoIf(
+ Word32Equal(representation, Int32Constant(Representation::kHeapObject)),
+ &r_heapobject);
+ GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
+ bailout);
+ CSA_ASSERT(this, Word32Equal(representation,
+ Int32Constant(Representation::kTagged)));
+ Goto(&all_fine);
+
+ BIND(&r_smi);
+ { Branch(TaggedIsSmi(value), &all_fine, bailout); }
+
+ BIND(&r_double);
+ {
+ GotoIf(TaggedIsSmi(value), &all_fine);
+ Node* value_map = LoadMap(value);
+ // While supporting mutable HeapNumbers would be straightforward, such
+ // objects should not end up here anyway.
+ CSA_ASSERT(this,
+ WordNotEqual(value_map,
+ LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
+ Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
+ }
+
+ BIND(&r_heapobject);
+ {
+ GotoIf(TaggedIsSmi(value), bailout);
+ Node* field_type =
+ LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
+ intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
+ intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
+ // FieldType::None can't hold any value.
+ GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
+ // FieldType::Any can hold any value.
+ GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
+ CSA_ASSERT(this, IsWeakCell(field_type));
+ // Cleared WeakCells count as FieldType::None, which can't hold any value.
+ field_type = LoadWeakCellValue(field_type, bailout);
+ // FieldType::Class(...) performs a map check.
+ CSA_ASSERT(this, IsMap(field_type));
+ Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
+ }
+
+ BIND(&all_fine);
+}
+
+void AccessorAssembler::OverwriteExistingFastDataProperty(
+ Node* object, Node* object_map, Node* descriptors,
+ Node* descriptor_name_index, Node* details, Node* value, Label* slow,
+ bool do_transitioning_store) {
+ Label done(this), if_field(this), if_descriptor(this);
+
+ CSA_ASSERT(this,
+ Word32Equal(DecodeWord32<PropertyDetails::KindField>(details),
+ Int32Constant(kData)));
+
+ Branch(Word32Equal(DecodeWord32<PropertyDetails::LocationField>(details),
+ Int32Constant(kField)),
+ &if_field, &if_descriptor);
+
+ BIND(&if_field);
+ {
+ if (FLAG_track_constant_fields && !do_transitioning_store) {
+ // TODO(ishell): Taking the slow path is not necessary if new and old
+ // values are identical.
+ GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
+ Int32Constant(kConst)),
+ slow);
+ }
+
+ Node* representation =
+ DecodeWord32<PropertyDetails::RepresentationField>(details);
+
+ CheckFieldType(descriptors, descriptor_name_index, representation, value,
+ slow);
+
+ Node* field_index =
+ DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
+ field_index = IntPtrAdd(field_index,
+ LoadMapInobjectPropertiesStartInWords(object_map));
+ Node* instance_size_in_words = LoadMapInstanceSizeInWords(object_map);
+
+ Label inobject(this), backing_store(this);
+ Branch(UintPtrLessThan(field_index, instance_size_in_words), &inobject,
+ &backing_store);
+
+ BIND(&inobject);
+ {
+ Node* field_offset = TimesPointerSize(field_index);
+ Label tagged_rep(this), double_rep(this);
+ Branch(
+ Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &double_rep, &tagged_rep);
+ BIND(&double_rep);
+ {
+ Node* double_value = ChangeNumberToFloat64(value);
+ if (FLAG_unbox_double_fields) {
+ if (do_transitioning_store) {
+ StoreMap(object, object_map);
+ }
+ StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
+ MachineRepresentation::kFloat64);
+ } else {
+ if (do_transitioning_store) {
+ Node* mutable_heap_number =
+ AllocateHeapNumberWithValue(double_value, MUTABLE);
+ StoreMap(object, object_map);
+ StoreObjectField(object, field_offset, mutable_heap_number);
+ } else {
+ Node* mutable_heap_number = LoadObjectField(object, field_offset);
+ StoreHeapNumberValue(mutable_heap_number, double_value);
+ }
+ }
+ Goto(&done);
+ }
+
+ BIND(&tagged_rep);
+ {
+ if (do_transitioning_store) {
+ StoreMap(object, object_map);
+ }
+ StoreObjectField(object, field_offset, value);
+ Goto(&done);
+ }
+ }
+
+ BIND(&backing_store);
+ {
+ Node* backing_store_index =
+ IntPtrSub(field_index, instance_size_in_words);
+
+ if (do_transitioning_store) {
+ // Allocate mutable heap number before extending properties backing
+ // store to ensure that heap verifier will not see the heap in
+ // inconsistent state.
+ VARIABLE(var_value, MachineRepresentation::kTagged, value);
+ {
+ Label cont(this);
+ GotoIf(Word32NotEqual(representation,
+ Int32Constant(Representation::kDouble)),
+ &cont);
+ {
+ Node* double_value = ChangeNumberToFloat64(value);
+ Node* mutable_heap_number =
+ AllocateHeapNumberWithValue(double_value, MUTABLE);
+ var_value.Bind(mutable_heap_number);
+ Goto(&cont);
+ }
+ BIND(&cont);
+ }
+
+ Node* properties =
+ ExtendPropertiesBackingStore(object, backing_store_index);
+ StoreFixedArrayElement(properties, backing_store_index,
+ var_value.value());
+ StoreMap(object, object_map);
+ Goto(&done);
+
+ } else {
+ Label tagged_rep(this), double_rep(this);
+ Node* properties = LoadFastProperties(object);
+ Branch(
+ Word32Equal(representation, Int32Constant(Representation::kDouble)),
+ &double_rep, &tagged_rep);
+ BIND(&double_rep);
+ {
+ Node* mutable_heap_number =
+ LoadFixedArrayElement(properties, backing_store_index);
+ Node* double_value = ChangeNumberToFloat64(value);
+ StoreHeapNumberValue(mutable_heap_number, double_value);
+ Goto(&done);
+ }
+ BIND(&tagged_rep);
+ {
+ StoreFixedArrayElement(properties, backing_store_index, value);
+ Goto(&done);
+ }
+ }
+ }
+ }
+
+ BIND(&if_descriptor);
+ {
+ // Check that constant matches value.
+ Node* constant = LoadValueByKeyIndex<DescriptorArray>(
+ descriptors, descriptor_name_index);
+ GotoIf(WordNotEqual(value, constant), slow);
+
+ if (do_transitioning_store) {
+ StoreMap(object, object_map);
+ }
+ Goto(&done);
}
+ BIND(&done);
+}
+
+void AccessorAssembler::CheckPrototypeValidityCell(Node* maybe_validity_cell,
+ Label* miss) {
+ Label done(this);
+ GotoIf(WordEqual(maybe_validity_cell, SmiConstant(Map::kPrototypeChainValid)),
+ &done);
+ CSA_ASSERT(this, TaggedIsNotSmi(maybe_validity_cell));
+
+ Node* cell_value = LoadObjectField(maybe_validity_cell, Cell::kValueOffset);
+ Branch(WordEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)), &done,
+ miss);
+
+ BIND(&done);
}
void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
@@ -943,7 +1225,6 @@ void AccessorAssembler::HandleStoreICProtoHandler(
p, handler, on_code_handler,
// on_found_on_receiver
[=](Node* properties, Node* name_index) {
- // TODO(ishell): combine with |found| case inside |if_store_normal|.
Node* details =
LoadDetailsByKeyIndex<NameDictionary>(properties, name_index);
// Check that the property is a writable data property (no accessor).
@@ -958,99 +1239,20 @@ void AccessorAssembler::HandleStoreICProtoHandler(
},
miss, ic_mode);
- Label if_transition_map(this), if_holder_object(this);
-
- Node* maybe_transition_or_holder_cell = LoadHandlerDataField(handler, 1);
- Node* maybe_transition_or_holder =
- LoadWeakCellValue(maybe_transition_or_holder_cell, miss);
- Branch(IsMap(maybe_transition_or_holder), &if_transition_map,
- &if_holder_object);
-
- BIND(&if_transition_map);
{
- Label if_transition_to_constant(this), if_store_normal(this);
-
- Node* holder = p->receiver;
- Node* transition_map = maybe_transition_or_holder;
+ Label if_add_normal(this), if_store_global_proxy(this), if_api_setter(this),
+ if_accessor(this), if_native_data_property(this);
- GotoIf(IsDeprecatedMap(transition_map), miss);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
Node* handler_word = SmiUntag(smi_handler);
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kNormal)),
- &if_store_normal);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToConstant)),
- &if_transition_to_constant);
-
- CSA_ASSERT(this,
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToField)));
-
- // Handle transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition_map,
- miss);
-
- BIND(&if_transition_to_constant);
- {
- // Check that constant matches value.
- Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
- Node* constant = LoadDescriptorValue(transition_map, descriptor);
- GotoIf(WordNotEqual(p->value, constant), miss);
-
- StoreMap(p->receiver, transition_map);
- Return(p->value);
- }
-
- BIND(&if_store_normal);
- {
- Node* properties = LoadSlowProperties(p->receiver);
-
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
- Label found(this, &var_name_index), not_found(this);
- NameDictionaryLookup<NameDictionary>(properties, p->name, &found,
- &var_name_index, &not_found);
- BIND(&found);
- {
- Node* details = LoadDetailsByKeyIndex<NameDictionary>(
- properties, var_name_index.value());
- // Check that the property is a writable data property (no accessor).
- const int kTypeAndReadOnlyMask =
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask;
- STATIC_ASSERT(kData == 0);
- GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), miss);
-
- StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
- p->value);
- Return(p->value);
- }
-
- BIND(&not_found);
- {
- Label slow(this);
- Node* receiver_map = LoadMap(p->receiver);
- InvalidateValidityCellIfPrototype(receiver_map);
+ &if_add_normal);
- Add<NameDictionary>(properties, p->name, p->value, &slow);
- Return(p->value);
-
- BIND(&slow);
- TailCallRuntime(Runtime::kAddDictionaryProperty, p->context,
- p->receiver, p->name, p->value);
- }
- }
- }
- BIND(&if_holder_object);
- {
- Label if_store_global_proxy(this), if_api_setter(this), if_accessor(this),
- if_native_data_property(this);
- Node* holder = maybe_transition_or_holder;
-
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
- Node* handler_word = SmiUntag(smi_handler);
+ Node* holder_cell = LoadHandlerDataField(handler, 1);
+ Node* holder = LoadWeakCellValue(holder_cell, miss);
- Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
GotoIf(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kGlobalProxy)),
&if_store_global_proxy);
@@ -1072,6 +1274,25 @@ void AccessorAssembler::HandleStoreICProtoHandler(
WordEqual(handler_kind, IntPtrConstant(StoreHandler::kProxy)));
HandleStoreToProxy(p, holder, miss, support_elements);
+ BIND(&if_add_normal);
+ {
+ // This is a case of "transitioning store" to a dictionary mode object
+ // when the property is still does not exist. The "existing property"
+ // case is covered above by LookupOnReceiver bit handling of the smi
+ // handler.
+ Label slow(this);
+ Node* receiver_map = LoadMap(p->receiver);
+ InvalidateValidityCellIfPrototype(receiver_map);
+
+ Node* properties = LoadSlowProperties(p->receiver);
+ Add<NameDictionary>(properties, p->name, p->value, &slow);
+ Return(p->value);
+
+ BIND(&slow);
+ TailCallRuntime(Runtime::kAddDictionaryProperty, p->context, p->receiver,
+ p->name, p->value);
+ }
+
BIND(&if_accessor);
HandleStoreAccessor(p, holder, handler_word);
@@ -1086,11 +1307,10 @@ void AccessorAssembler::HandleStoreICProtoHandler(
// Context is stored either in data2 or data3 field depending on whether
// the access check is enabled for this handler or not.
- Node* context_cell = Select(
+ TNode<Object> context_cell = Select<Object>(
IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_word),
[=] { return LoadHandlerDataField(handler, 3); },
- [=] { return LoadHandlerDataField(handler, 2); },
- MachineRepresentation::kTagged);
+ [=] { return LoadHandlerDataField(handler, 2); });
Node* context = LoadWeakCellValueUnchecked(context_cell);
@@ -1177,30 +1397,19 @@ void AccessorAssembler::HandleStoreToProxy(const StoreICParameters* p,
void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
Node* holder, Node* value,
- Node* transition,
Label* miss) {
- Comment(transition ? "transitioning field store" : "field store");
+ Comment("field store");
#ifdef DEBUG
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- if (transition) {
+ if (FLAG_track_constant_fields) {
CSA_ASSERT(
this,
- Word32Or(
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToField)),
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToConstant))));
+ Word32Or(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kConstField))));
} else {
- if (FLAG_track_constant_fields) {
- CSA_ASSERT(
- this, Word32Or(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kField)),
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kConstField))));
- } else {
- CSA_ASSERT(this,
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)));
- }
+ CSA_ASSERT(this,
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)));
}
#endif
@@ -1225,40 +1434,37 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
{
Comment("store tagged field");
HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
- value, transition, miss);
+ value, miss);
}
BIND(&if_double_field);
{
Comment("store double field");
HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
- value, transition, miss);
+ value, miss);
}
BIND(&if_heap_object_field);
{
Comment("store heap object field");
HandleStoreFieldAndReturn(handler_word, holder,
- Representation::HeapObject(), value, transition,
- miss);
+ Representation::HeapObject(), value, miss);
}
BIND(&if_smi_field);
{
Comment("store smi field");
HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
- value, transition, miss);
+ value, miss);
}
}
void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
Node* holder,
Representation representation,
- Node* value, Node* transition,
- Label* miss) {
- bool transition_to_field = transition != nullptr;
- Node* prepared_value = PrepareValueForStore(
- handler_word, holder, representation, transition, value, miss);
+ Node* value, Label* miss) {
+ Node* prepared_value =
+ PrepareValueForStore(handler_word, holder, representation, value, miss);
Label if_inobject(this), if_out_of_object(this);
Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
@@ -1267,32 +1473,21 @@ void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
BIND(&if_inobject);
{
StoreNamedField(handler_word, holder, true, representation, prepared_value,
- transition_to_field, miss);
- if (transition_to_field) {
- StoreMap(holder, transition);
- }
+ miss);
Return(value);
}
BIND(&if_out_of_object);
{
- if (transition_to_field) {
- ExtendPropertiesBackingStore(holder, handler_word);
- }
-
StoreNamedField(handler_word, holder, false, representation, prepared_value,
- transition_to_field, miss);
- if (transition_to_field) {
- StoreMap(holder, transition);
- }
+ miss);
Return(value);
}
}
Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
Representation representation,
- Node* transition, Node* value,
- Label* bailout) {
+ Node* value, Label* bailout) {
if (representation.IsDouble()) {
value = TryTaggedToFloat64(value, bailout);
@@ -1300,7 +1495,7 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
GotoIf(TaggedIsSmi(value), bailout);
Label done(this);
- if (FLAG_track_constant_fields && !transition) {
+ if (FLAG_track_constant_fields) {
// Skip field type check in favor of constant value check when storing
// to constant field.
GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
@@ -1308,8 +1503,7 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
&done);
}
Node* descriptor = DecodeWord<StoreHandler::DescriptorBits>(handler_word);
- Node* maybe_field_type = LoadDescriptorValue(
- transition ? transition : LoadMap(holder), descriptor);
+ Node* maybe_field_type = LoadDescriptorValue(LoadMap(holder), descriptor);
GotoIf(TaggedIsSmi(maybe_field_type), &done);
// Check that value type matches the field type.
@@ -1328,10 +1522,8 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
return value;
}
-void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
- Node* handler_word) {
- Label done(this);
- GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word), &done);
+Node* AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
+ Node* index) {
Comment("[ Extend storage");
ParameterMode mode = OptimalParameterMode();
@@ -1375,14 +1567,14 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
BIND(&extend_store);
{
+ VARIABLE(var_new_properties, MachineRepresentation::kTaggedPointer,
+ var_properties.value());
+ Label done(this);
// Previous property deletion could have left behind unused backing store
// capacity even for a map that think it doesn't have any unused fields.
// Perform a bounds check to see if we actually have to grow the array.
- Node* index = DecodeWord<StoreHandler::FieldIndexBits>(handler_word);
- Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
- Node* size = ElementOffsetFromIndex(var_length.value(), PACKED_ELEMENTS,
- mode, FixedArray::kHeaderSize);
- GotoIf(UintPtrLessThan(offset, size), &done);
+ GotoIf(UintPtrLessThan(index, ParameterToIntPtr(var_length.value(), mode)),
+ &done);
Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
Node* new_capacity = IntPtrOrSmiAdd(var_length.value(), delta, mode);
@@ -1400,6 +1592,7 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
mode));
Node* new_properties = AllocatePropertyArray(new_capacity, mode);
+ var_new_properties.Bind(new_properties);
FillPropertyArrayWithUndefined(new_properties, var_length.value(),
new_capacity, mode);
@@ -1420,15 +1613,15 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
StoreObjectField(object, JSObject::kPropertiesOrHashOffset, new_properties);
Comment("] Extend storage");
Goto(&done);
+ BIND(&done);
+ return var_new_properties.value();
}
- BIND(&done);
}
void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
bool is_inobject,
Representation representation,
- Node* value, bool transition_to_field,
- Label* bailout) {
+ Node* value, Label* bailout) {
bool store_value_as_double = representation.IsDouble();
Node* property_storage = object;
if (!is_inobject) {
@@ -1439,22 +1632,15 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
Node* offset = IntPtrMul(index, IntPtrConstant(kPointerSize));
if (representation.IsDouble()) {
if (!FLAG_unbox_double_fields || !is_inobject) {
- if (transition_to_field) {
- Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
- // Store the new mutable heap number into the object.
- value = heap_number;
- store_value_as_double = false;
- } else {
- // Load the heap number.
- property_storage = LoadObjectField(property_storage, offset);
- // Store the double value into it.
- offset = IntPtrConstant(HeapNumber::kValueOffset);
- }
+ // Load the mutable heap number.
+ property_storage = LoadObjectField(property_storage, offset);
+ // Store the double value into it.
+ offset = IntPtrConstant(HeapNumber::kValueOffset);
}
}
// Do constant value check if necessary.
- if (FLAG_track_constant_fields && !transition_to_field) {
+ if (FLAG_track_constant_fields) {
Label done(this);
GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
IntPtrConstant(StoreHandler::kConstField)),
@@ -1724,17 +1910,22 @@ void AccessorAssembler::BranchIfStrictMode(Node* vector, Node* slot,
Label* if_strict) {
Node* sfi =
LoadObjectField(vector, FeedbackVector::kSharedFunctionInfoOffset);
- Node* metadata =
- LoadObjectField(sfi, SharedFunctionInfo::kFeedbackMetadataOffset);
+ TNode<FeedbackMetadata> metadata = CAST(LoadObjectField(
+ sfi, SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset));
Node* slot_int = SmiToInt32(slot);
// See VectorICComputer::index().
const int kItemsPerWord = FeedbackMetadata::VectorICComputer::kItemsPerWord;
Node* word_index = Int32Div(slot_int, Int32Constant(kItemsPerWord));
Node* word_offset = Int32Mod(slot_int, Int32Constant(kItemsPerWord));
- Node* data = SmiToInt32(LoadFixedArrayElement(
- metadata, ChangeInt32ToIntPtr(word_index),
- FeedbackMetadata::kReservedIndexCount * kPointerSize, INTPTR_PARAMETERS));
+
+ int32_t first_item = FeedbackMetadata::kHeaderSize - kHeapObjectTag;
+ Node* offset =
+ ElementOffsetFromIndex(ChangeInt32ToIntPtr(word_index), UINT32_ELEMENTS,
+ INTPTR_PARAMETERS, first_item);
+
+ Node* data = Load(MachineType::Int32(), metadata, offset);
+
// See VectorICComputer::decode().
const int kBitsPerItem = FeedbackMetadata::kFeedbackSlotKindBits;
Node* shift = Int32Mul(word_offset, Int32Constant(kBitsPerItem));
@@ -1857,9 +2048,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
// Receivers requiring non-standard accesses (interceptors, access
// checks, strings and string wrappers) are handled in the runtime.
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- &special_receiver);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &special_receiver);
// Check if the receiver has fast or slow properties.
Node* bitfield3 = LoadMapBitField3(receiver_map);
@@ -1871,7 +2060,7 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
Node* descriptors = LoadMapDescriptors(receiver_map);
Label if_descriptor_found(this), stub_cache(this);
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
+ TVARIABLE(IntPtrT, var_name_index);
Label* notfound =
use_stub_cache == kUseStubCache ? &stub_cache : &lookup_prototype_chain;
DescriptorLookup(p->name, descriptors, bitfield3, &if_descriptor_found,
@@ -2433,9 +2622,8 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
// TODO(jkummerow): Inline this? Or some of it?
- TailCallStub(
- Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC_Megamorphic),
- p->context, p->receiver, p->name, p->slot, p->vector);
+ TailCallBuiltin(Builtins::kKeyedLoadIC_Megamorphic, p->context, p->receiver,
+ p->name, p->slot, p->vector);
}
BIND(&try_polymorphic_name);
{
@@ -2641,10 +2829,8 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) {
GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
&miss);
- Callable stub =
- Builtins::CallableFor(isolate(), Builtins::kStoreIC_Uninitialized);
- TailCallStub(stub, p->context, p->receiver, p->name, p->value, p->slot,
- p->vector);
+ TailCallBuiltin(Builtins::kStoreIC_Uninitialized, p->context, p->receiver,
+ p->name, p->value, p->slot, p->vector);
}
BIND(&miss);
{
@@ -2814,9 +3000,8 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
GotoIfNot(
WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
&try_polymorphic_name);
- TailCallStub(
- Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC_Megamorphic),
- p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+ TailCallBuiltin(Builtins::kKeyedStoreIC_Megamorphic, p->context,
+ p->receiver, p->name, p->value, p->slot, p->vector);
}
BIND(&try_polymorphic_name);
@@ -2840,6 +3025,86 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) {
}
}
+void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) {
+ Label miss(this, Label::kDeferred);
+ {
+ VARIABLE(var_handler, MachineRepresentation::kTagged);
+
+ Label if_handler(this, &var_handler),
+ try_polymorphic(this, Label::kDeferred),
+ try_megamorphic(this, Label::kDeferred);
+
+ Node* array_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsDeprecatedMap(array_map), &miss);
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, array_map, &if_handler,
+ &var_handler, &try_polymorphic);
+
+ BIND(&if_handler);
+ {
+ Comment("StoreInArrayLiteralIC_if_handler");
+ // This is a stripped-down version of HandleStoreICHandlerCase.
+
+ Node* handler = var_handler.value();
+ Label if_transitioning_element_store(this);
+ GotoIfNot(IsCode(handler), &if_transitioning_element_store);
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+
+ BIND(&if_transitioning_element_store);
+ {
+ Node* transition_map_cell = LoadHandlerDataField(handler, 1);
+ Node* transition_map = LoadWeakCellValue(transition_map_cell, &miss);
+ CSA_ASSERT(this, IsMap(transition_map));
+ GotoIf(IsDeprecatedMap(transition_map), &miss);
+ Node* code = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, IsCode(code));
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code, p->context, p->receiver, p->name,
+ transition_map, p->value, p->slot, p->vector);
+ }
+ }
+
+ BIND(&try_polymorphic);
+ {
+ Comment("StoreInArrayLiteralIC_try_polymorphic");
+ GotoIfNot(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ HandlePolymorphicCase(array_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ BIND(&try_megamorphic);
+ {
+ Comment("StoreInArrayLiteralIC_try_megamorphic");
+ CSA_ASSERT(
+ this,
+ Word32Or(
+ Word32Or(
+ IsWeakCellMap(LoadMap(feedback)),
+ WordEqual(feedback,
+ LoadRoot(Heap::kuninitialized_symbolRootIndex))),
+ WordEqual(feedback,
+ LoadRoot(Heap::kmegamorphic_symbolRootIndex))));
+ GotoIfNot(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+ TailCallRuntime(Runtime::kStoreInArrayLiteralIC_Slow, p->context,
+ p->value, p->receiver, p->name);
+ }
+ }
+
+ BIND(&miss);
+ {
+ Comment("StoreInArrayLiteralIC_miss");
+ // TODO(neis): Introduce Runtime::kStoreInArrayLiteralIC_Miss.
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
//////////////////// Public methods.
void AccessorAssembler::GenerateLoadIC() {
@@ -3035,9 +3300,7 @@ void AccessorAssembler::GenerateStoreGlobalICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable =
- Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
- TailCallStub(callable, context, name, value, slot, vector);
+ TailCallBuiltin(Builtins::kStoreGlobalIC, context, name, value, slot, vector);
}
void AccessorAssembler::GenerateStoreIC() {
@@ -3064,8 +3327,8 @@ void AccessorAssembler::GenerateStoreICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kStoreIC);
- TailCallStub(callable, context, receiver, name, value, slot, vector);
+ TailCallBuiltin(Builtins::kStoreIC, context, receiver, name, value, slot,
+ vector);
}
void AccessorAssembler::GenerateKeyedStoreIC() {
@@ -3092,8 +3355,22 @@ void AccessorAssembler::GenerateKeyedStoreICTrampoline() {
Node* context = Parameter(Descriptor::kContext);
Node* vector = LoadFeedbackVectorForStub();
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
- TailCallStub(callable, context, receiver, name, value, slot, vector);
+ TailCallBuiltin(Builtins::kKeyedStoreIC, context, receiver, name, value, slot,
+ vector);
+}
+
+void AccessorAssembler::GenerateStoreInArrayLiteralIC() {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* array = Parameter(Descriptor::kReceiver);
+ Node* index = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ StoreICParameters p(context, array, index, value, slot, vector);
+ StoreInArrayLiteralIC(&p);
}
} // namespace internal
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 3e4f551c14..7428eeec4d 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -18,7 +18,11 @@ class ExitPoint;
class AccessorAssembler : public CodeStubAssembler {
public:
- typedef compiler::Node Node;
+ using Node = compiler::Node;
+ template <class T>
+ using TNode = compiler::TNode<T>;
+ template <class T>
+ using SloppyTNode = compiler::SloppyTNode<T>;
explicit AccessorAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
@@ -43,6 +47,8 @@ class AccessorAssembler : public CodeStubAssembler {
void GenerateKeyedStoreIC();
void GenerateKeyedStoreICTrampoline();
+ void GenerateStoreInArrayLiteralIC();
+
void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
Label* if_handler, Variable* var_handler,
Label* if_miss);
@@ -83,7 +89,8 @@ class AccessorAssembler : public CodeStubAssembler {
void LoadIC_BytecodeHandler(const LoadICParameters* p, ExitPoint* exit_point);
// Loads dataX field from the DataHandler object.
- Node* LoadHandlerDataField(Node* handler, int data_index);
+ TNode<Object> LoadHandlerDataField(SloppyTNode<DataHandler> handler,
+ int data_index);
protected:
struct StoreICParameters : public LoadICParameters {
@@ -99,12 +106,26 @@ class AccessorAssembler : public CodeStubAssembler {
void HandleStoreICHandlerCase(
const StoreICParameters* p, Node* handler, Label* miss, ICMode ic_mode,
ElementSupport support_elements = kOnlyProperties);
+ void HandleStoreICTransitionMapHandlerCase(const StoreICParameters* p,
+ Node* transition_map, Label* miss,
+ bool validate_transition_handler);
+
void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
void BranchIfStrictMode(Node* vector, Node* slot, Label* if_strict);
void InvalidateValidityCellIfPrototype(Node* map, Node* bitfield2 = nullptr);
+ void OverwriteExistingFastDataProperty(Node* object, Node* object_map,
+ Node* descriptors,
+ Node* descriptor_name_index,
+ Node* details, Node* value,
+ Label* slow,
+ bool do_transitioning_store);
+
+ void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
+ Node* value, Label* bailout);
+
private:
// Stub generation entry points.
@@ -127,6 +148,7 @@ class AccessorAssembler : public CodeStubAssembler {
void StoreGlobalIC_PropertyCellCase(Node* property_cell, Node* value,
ExitPoint* exit_point, Label* miss);
void KeyedStoreIC(const StoreICParameters* p);
+ void StoreInArrayLiteralIC(const StoreICParameters* p);
// IC dispatcher behavior.
@@ -184,16 +206,13 @@ class AccessorAssembler : public CodeStubAssembler {
void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
Label* miss, ICMode ic_mode,
ElementSupport support_elements);
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
- Node* value, Node* transition, Label* miss);
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
+ Node* value, Label* miss);
void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
Representation representation, Node* value,
- Node* transition, Label* miss);
+ Label* miss);
+ void CheckPrototypeValidityCell(Node* maybe_validity_cell, Label* miss);
void HandleStoreICNativeDataProperty(const StoreICParameters* p, Node* holder,
Node* handler_word);
@@ -229,15 +248,16 @@ class AccessorAssembler : public CodeStubAssembler {
Node* GetLanguageMode(Node* vector, Node* slot);
Node* PrepareValueForStore(Node* handler_word, Node* holder,
- Representation representation, Node* transition,
- Node* value, Label* bailout);
+ Representation representation, Node* value,
+ Label* bailout);
- // Extends properties backing store by JSObject::kFieldsAdded elements.
- void ExtendPropertiesBackingStore(Node* object, Node* handler_word);
+ // Extends properties backing store by JSObject::kFieldsAdded elements,
+ // returns updated properties backing store.
+ Node* ExtendPropertiesBackingStore(Node* object, Node* index);
void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
Representation representation, Node* value,
- bool transition_to_field, Label* bailout);
+ Label* bailout);
void EmitFastElementsBoundsCheck(Node* object, Node* elements,
Node* intptr_index,
@@ -274,11 +294,21 @@ class ExitPoint {
typedef compiler::CodeAssemblerVariable CodeAssemblerVariable;
public:
+ typedef std::function<void(Node* result)> IndirectReturnHandler;
+
explicit ExitPoint(CodeStubAssembler* assembler)
- : ExitPoint(assembler, nullptr, nullptr) {}
+ : ExitPoint(assembler, nullptr) {}
+
+ ExitPoint(CodeStubAssembler* assembler,
+ const IndirectReturnHandler& indirect_return_handler)
+ : asm_(assembler), indirect_return_handler_(indirect_return_handler) {}
+
ExitPoint(CodeStubAssembler* assembler, CodeAssemblerLabel* out,
CodeAssemblerVariable* var_result)
- : out_(out), var_result_(var_result), asm_(assembler) {
+ : ExitPoint(assembler, [=](Node* result) {
+ var_result->Bind(result);
+ assembler->Goto(out);
+ }) {
DCHECK_EQ(out != nullptr, var_result != nullptr);
}
@@ -288,7 +318,7 @@ class ExitPoint {
if (IsDirect()) {
asm_->TailCallRuntime(function, context, args...);
} else {
- IndirectReturn(asm_->CallRuntime(function, context, args...));
+ indirect_return_handler_(asm_->CallRuntime(function, context, args...));
}
}
@@ -297,7 +327,7 @@ class ExitPoint {
if (IsDirect()) {
asm_->TailCallStub(callable, context, args...);
} else {
- IndirectReturn(asm_->CallStub(callable, context, args...));
+ indirect_return_handler_(asm_->CallStub(callable, context, args...));
}
}
@@ -307,7 +337,8 @@ class ExitPoint {
if (IsDirect()) {
asm_->TailCallStub(descriptor, target, context, args...);
} else {
- IndirectReturn(asm_->CallStub(descriptor, target, context, args...));
+ indirect_return_handler_(
+ asm_->CallStub(descriptor, target, context, args...));
}
}
@@ -315,21 +346,15 @@ class ExitPoint {
if (IsDirect()) {
asm_->Return(result);
} else {
- IndirectReturn(result);
+ indirect_return_handler_(result);
}
}
- bool IsDirect() const { return out_ == nullptr; }
+ bool IsDirect() const { return !indirect_return_handler_; }
private:
- void IndirectReturn(Node* const result) {
- var_result_->Bind(result);
- asm_->Goto(out_);
- }
-
- CodeAssemblerLabel* const out_;
- CodeAssemblerVariable* const var_result_;
CodeStubAssembler* const asm_;
+ IndirectReturnHandler indirect_return_handler_;
};
} // namespace internal
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index cf2577a01f..08054ef498 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -130,8 +130,7 @@ Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
- Representation representation,
- bool extend_storage) {
+ Representation representation) {
FieldRepresentation field_rep;
switch (representation.kind()) {
case Representation::kSmi:
@@ -150,13 +149,9 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
UNREACHABLE();
}
- DCHECK(kind == kField || kind == kTransitionToField ||
- (kind == kConstField && FLAG_track_constant_fields));
- DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
- DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
+ DCHECK(kind == kField || (kind == kConstField && FLAG_track_constant_fields));
int config = KindBits::encode(kind) |
- ExtendStorageBits::encode(extend_storage) |
IsInobjectBits::encode(field_index.is_inobject()) |
FieldRepresentationBits::encode(field_rep) |
DescriptorBits::encode(descriptor) |
@@ -170,24 +165,7 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
Representation representation) {
DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
Kind kind = constness == kMutable ? kField : kConstField;
- return StoreField(isolate, kind, descriptor, field_index, representation,
- false);
-}
-
-Handle<Smi> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
- FieldIndex field_index,
- Representation representation,
- bool extend_storage) {
- return StoreField(isolate, kTransitionToField, descriptor, field_index,
- representation, extend_storage);
-}
-
-Handle<Smi> StoreHandler::TransitionToConstant(Isolate* isolate,
- int descriptor) {
- DCHECK(!FLAG_track_constant_fields);
- int config = KindBits::encode(kTransitionToConstant) |
- DescriptorBits::encode(descriptor);
- return handle(Smi::FromInt(config), isolate);
+ return StoreField(isolate, kind, descriptor, field_index, representation);
}
Handle<Smi> StoreHandler::StoreNativeDataProperty(Isolate* isolate,
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index 19614a4322..7a5d6a46f7 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -123,13 +123,6 @@ Handle<Object> LoadHandler::LoadFromPrototype(Isolate* isolate,
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- if (validity_cell.is_null()) {
- // Although in case of kApiGetter we load from receiver we still have to
- // use the "prototype" shape of a handler in order to provide additional
- // data to the dispatcher.
- DCHECK_EQ(kApiGetter, GetHandlerKind(*smi_handler));
- validity_cell = handle(Smi::kZero, isolate);
- }
int data_count = 1 + checks_count;
Handle<LoadHandler> handler = isolate->factory()->NewLoadHandler(data_count);
@@ -153,11 +146,10 @@ Handle<Object> LoadHandler::LoadFullChain(Isolate* isolate,
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- if (validity_cell.is_null()) {
+ if (validity_cell->IsSmi()) {
DCHECK_EQ(0, checks_count);
// Lookup on receiver isn't supported in case of a simple smi handler.
if (!LookupOnReceiverBits::decode(smi_handler->value())) return smi_handler;
- validity_cell = handle(Smi::kZero, isolate);
}
int data_count = 1 + checks_count;
@@ -195,9 +187,6 @@ Handle<Object> StoreHandler::StoreElementTransition(
.GetCode();
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- if (validity_cell.is_null()) {
- validity_cell = handle(Smi::kZero, isolate);
- }
Handle<WeakCell> cell = Map::WeakCellForMap(transition);
Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(1);
handler->set_smi_handler(*stub);
@@ -206,28 +195,50 @@ Handle<Object> StoreHandler::StoreElementTransition(
return handler;
}
-Handle<Smi> StoreHandler::StoreTransition(Isolate* isolate,
- Handle<Map> transition_map) {
- int descriptor = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
+Handle<Object> StoreHandler::StoreTransition(Isolate* isolate,
+ Handle<Map> transition_map) {
+ bool is_dictionary_map = transition_map->is_dictionary_map();
+#ifdef DEBUG
+ if (!is_dictionary_map) {
+ int descriptor = transition_map->LastAdded();
+ Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ if (descriptors->GetKey(descriptor)->IsPrivate()) {
+ DCHECK_EQ(DONT_ENUM, details.attributes());
+ } else {
+ DCHECK_EQ(NONE, details.attributes());
+ }
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+ }
+#endif
// Declarative handlers don't support access checks.
DCHECK(!transition_map->is_access_check_needed());
- DCHECK_EQ(kData, details.kind());
- if (details.location() == PropertyLocation::kDescriptor) {
- return TransitionToConstant(isolate, descriptor);
+ // Get validity cell value if it is necessary for the handler.
+ Handle<Object> validity_cell;
+ if (is_dictionary_map || !transition_map->IsPrototypeValidityCellValid()) {
+ validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(transition_map, isolate);
}
- DCHECK_EQ(PropertyLocation::kField, details.location());
- bool extend_storage =
- Map::cast(transition_map->GetBackPointer())->UnusedPropertyFields() == 0;
- FieldIndex index = FieldIndex::ForDescriptor(*transition_map, descriptor);
- return TransitionToField(isolate, descriptor, index, representation,
- extend_storage);
+ if (is_dictionary_map) {
+ DCHECK(!transition_map->IsJSGlobalObjectMap());
+ Handle<StoreHandler> handler = isolate->factory()->NewStoreHandler(0);
+ // Store normal with enabled lookup on receiver.
+ int config = KindBits::encode(kNormal) | LookupOnReceiverBits::encode(true);
+ handler->set_smi_handler(Smi::FromInt(config));
+ handler->set_validity_cell(*validity_cell);
+ return handler;
+
+ } else {
+ // Ensure the transition map contains a valid prototype validity cell.
+ if (!validity_cell.is_null()) {
+ transition_map->set_prototype_validity_cell(*validity_cell);
+ }
+ Handle<WeakCell> cell = Map::WeakCellForMap(transition_map);
+ return cell;
+ }
}
// static
@@ -245,10 +256,7 @@ Handle<Object> StoreHandler::StoreThroughPrototype(
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate);
- if (validity_cell.is_null()) {
- DCHECK_EQ(0, checks_count);
- validity_cell = handle(Smi::kZero, isolate);
- }
+ DCHECK_IMPLIES(validity_cell->IsSmi(), checks_count == 0);
int data_count = 1 + checks_count;
Handle<StoreHandler> handler =
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 514a5ed5fa..67f7a1d08e 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -192,9 +192,6 @@ class StoreHandler final : public DataHandler {
kElement,
kField,
kConstField,
- // TODO(ishell): remove once constant field tracking is done.
- kTransitionToConstant = kConstField,
- kTransitionToField,
kAccessor,
kNativeDataProperty,
kApiSetter,
@@ -236,8 +233,7 @@ class StoreHandler final : public DataHandler {
//
// Encoding when KindBits contains kField or kTransitionToField.
//
- class ExtendStorageBits : public BitField<bool, DescriptorBits::kNext, 1> {};
- class IsInobjectBits : public BitField<bool, ExtendStorageBits::kNext, 1> {};
+ class IsInobjectBits : public BitField<bool, DescriptorBits::kNext, 1> {};
class FieldRepresentationBits
: public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
// +1 here is to cover all possible JSObject header sizes.
@@ -257,8 +253,8 @@ class StoreHandler final : public DataHandler {
PropertyConstness constness,
Representation representation);
- static Handle<Smi> StoreTransition(Isolate* isolate,
- Handle<Map> transition_map);
+ static Handle<Object> StoreTransition(Isolate* isolate,
+ Handle<Map> transition_map);
// Creates a Smi-handler for storing a native data property on a fast object.
static inline Handle<Smi> StoreNativeDataProperty(Isolate* isolate,
@@ -303,19 +299,7 @@ class StoreHandler final : public DataHandler {
private:
static inline Handle<Smi> StoreField(Isolate* isolate, Kind kind,
int descriptor, FieldIndex field_index,
- Representation representation,
- bool extend_storage);
-
- // Creates a Smi-handler for transitioning store to a field.
- static inline Handle<Smi> TransitionToField(Isolate* isolate, int descriptor,
- FieldIndex field_index,
- Representation representation,
- bool extend_storage);
-
- // Creates a Smi-handler for transitioning store to a constant field (in this
- // case the only thing that needs to be done is an update of a map).
- static inline Handle<Smi> TransitionToConstant(Isolate* isolate,
- int descriptor);
+ Representation representation);
};
} // namespace internal
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index e6fa0b1ceb..8924723951 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -74,8 +74,6 @@ const char* GetModifier(KeyedAccessStoreMode mode) {
} // namespace
-#define TRACE_GENERIC_IC(reason) set_slow_stub_reason(reason);
-
void IC::TraceIC(const char* type, Handle<Object> name) {
if (FLAG_ic_stats) {
if (AddressIsDeoptimizedCode()) return;
@@ -97,14 +95,16 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
if (IsKeyedLoadIC()) {
KeyedAccessLoadMode mode = nexus()->GetKeyedAccessLoadMode();
modifier = GetModifier(mode);
- } else if (IsKeyedStoreIC()) {
+ } else if (IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind())) {
KeyedAccessStoreMode mode = nexus()->GetKeyedAccessStoreMode();
modifier = GetModifier(mode);
}
+ bool keyed_prefix = is_keyed() && !IsStoreInArrayLiteralICKind(kind());
+
if (!(FLAG_ic_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- LOG(isolate(), ICEvent(type, is_keyed(), map, *name,
+ LOG(isolate(), ICEvent(type, keyed_prefix, map, *name,
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state), modifier,
slow_stub_reason_));
@@ -113,7 +113,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ICStats::instance()->Begin();
ICInfo& ic_info = ICStats::instance()->Current();
- ic_info.type = is_keyed() ? "Keyed" : "";
+ ic_info.type = keyed_prefix ? "Keyed" : "";
ic_info.type += type;
Object* maybe_function =
@@ -124,8 +124,7 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
if (function->IsInterpreted()) {
code_offset = InterpretedFrame::GetBytecodeOffset(fp());
} else {
- code_offset =
- static_cast<int>(pc() - function->code()->instruction_start());
+ code_offset = static_cast<int>(pc() - function->code()->InstructionStart());
}
JavaScriptFrame::CollectFunctionAndOffsetForICStats(
function, function->abstract_code(), code_offset);
@@ -149,8 +148,6 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
}
-#define TRACE_IC(type, name) TraceIC(type, name)
-
IC::IC(FrameDepth depth, Isolate* isolate, Handle<FeedbackVector> vector,
FeedbackSlot slot)
: isolate_(isolate),
@@ -436,7 +433,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
update_receiver_map(object);
PatchCache(name, slow_stub());
- TRACE_IC("LoadIC", name);
+ TraceIC("LoadIC", name);
}
if (*name == isolate()->heap()->iterator_symbol()) {
@@ -517,7 +514,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_SlowStub);
PatchCache(name, slow_stub());
}
- TRACE_IC("LoadGlobalIC", name);
+ TraceIC("LoadGlobalIC", name);
}
return result;
}
@@ -679,7 +676,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
- TRACE_IC("LoadIC", lookup->name());
+ TraceIC("LoadIC", lookup->name());
return;
}
@@ -702,7 +699,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
- TRACE_IC("LoadGlobalIC", lookup->name());
+ TraceIC("LoadGlobalIC", lookup->name());
return;
}
}
@@ -710,7 +707,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
}
PatchCache(lookup->name(), code);
- TRACE_IC("LoadIC", lookup->name());
+ TraceIC("LoadIC", lookup->name());
}
StubCache* IC::stub_cache() {
@@ -822,15 +819,16 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
return ComputeHandler(lookup);
}
- // When debugging we need to go the slow path to flood the accessor.
- if (GetHostFunction()->shared()->HasBreakInfo()) {
+ Handle<Object> getter(AccessorPair::cast(*accessors)->getter(),
+ isolate());
+ if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
- Handle<Object> getter(AccessorPair::cast(*accessors)->getter(),
- isolate());
- if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
+ if (getter->IsFunctionTemplateInfo() &&
+ FunctionTemplateInfo::cast(*getter)->BreakAtEntry()) {
+ // Do not install an IC if the api function has a breakpoint.
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
@@ -1008,11 +1006,11 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
for (Handle<Map> map : target_receiver_maps) {
if (map.is_null()) continue;
if (map->instance_type() == JS_VALUE_TYPE) {
- TRACE_GENERIC_IC("JSValue");
+ set_slow_stub_reason("JSValue");
return;
}
if (map->instance_type() == JS_PROXY_TYPE) {
- TRACE_GENERIC_IC("JSProxy");
+ set_slow_stub_reason("JSProxy");
return;
}
}
@@ -1045,7 +1043,7 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
!CanChangeToAllowOutOfBounds(receiver_map)) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC("same map added twice");
+ set_slow_stub_reason("same map added twice");
return;
}
}
@@ -1053,7 +1051,7 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver,
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.size() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC("max polymorph exceeded");
+ set_slow_stub_reason("max polymorph exceeded");
return;
}
@@ -1225,14 +1223,14 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
KeyedAccessLoadMode load_mode = GetLoadMode(object, index);
UpdateLoadElement(Handle<HeapObject>::cast(object), load_mode);
if (is_vector_set()) {
- TRACE_IC("LoadIC", key);
+ TraceIC("LoadIC", key);
}
}
}
if (vector_needs_update()) {
ConfigureVectorState(MEGAMORPHIC, key);
- TRACE_IC("LoadIC", key);
+ TraceIC("LoadIC", key);
}
if (!load_handle.is_null()) return load_handle;
@@ -1254,64 +1252,65 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
DCHECK(!receiver->map()->is_deprecated());
- for (; it->IsFound(); it->Next()) {
- switch (it->state()) {
- case LookupIterator::NOT_FOUND:
- case LookupIterator::TRANSITION:
- UNREACHABLE();
- case LookupIterator::JSPROXY:
- return true;
- case LookupIterator::INTERCEPTOR: {
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- InterceptorInfo* info = holder->GetNamedInterceptor();
- if (it->HolderIsReceiverOrHiddenPrototype()) {
- return !info->non_masking() && receiver.is_identical_to(holder) &&
- !info->setter()->IsUndefined(it->isolate());
- } else if (!info->getter()->IsUndefined(it->isolate()) ||
- !info->query()->IsUndefined(it->isolate())) {
- return false;
- }
- break;
- }
- case LookupIterator::ACCESS_CHECK:
- if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
- break;
- case LookupIterator::ACCESSOR:
- return !it->IsReadOnly();
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return false;
- case LookupIterator::DATA: {
- if (it->IsReadOnly()) return false;
- Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (receiver.is_identical_to(holder)) {
- it->PrepareForDataProperty(value);
- // The previous receiver map might just have been deprecated,
- // so reload it.
- update_receiver_map(receiver);
+ if (it->state() != LookupIterator::TRANSITION) {
+ for (; it->IsFound(); it->Next()) {
+ switch (it->state()) {
+ case LookupIterator::NOT_FOUND:
+ case LookupIterator::TRANSITION:
+ UNREACHABLE();
+ case LookupIterator::JSPROXY:
return true;
+ case LookupIterator::INTERCEPTOR: {
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ InterceptorInfo* info = holder->GetNamedInterceptor();
+ if (it->HolderIsReceiverOrHiddenPrototype()) {
+ return !info->non_masking() && receiver.is_identical_to(holder) &&
+ !info->setter()->IsUndefined(it->isolate());
+ } else if (!info->getter()->IsUndefined(it->isolate()) ||
+ !info->query()->IsUndefined(it->isolate())) {
+ return false;
+ }
+ break;
}
+ case LookupIterator::ACCESS_CHECK:
+ if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
+ break;
+ case LookupIterator::ACCESSOR:
+ return !it->IsReadOnly();
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return false;
+ case LookupIterator::DATA: {
+ if (it->IsReadOnly()) return false;
+ Handle<JSObject> holder = it->GetHolder<JSObject>();
+ if (receiver.is_identical_to(holder)) {
+ it->PrepareForDataProperty(value);
+ // The previous receiver map might just have been deprecated,
+ // so reload it.
+ update_receiver_map(receiver);
+ return true;
+ }
- // Receiver != holder.
- if (receiver->IsJSGlobalProxy()) {
- PrototypeIterator iter(it->isolate(), receiver);
- return it->GetHolder<Object>().is_identical_to(
- PrototypeIterator::GetCurrent(iter));
- }
+ // Receiver != holder.
+ if (receiver->IsJSGlobalProxy()) {
+ PrototypeIterator iter(it->isolate(), receiver);
+ return it->GetHolder<Object>().is_identical_to(
+ PrototypeIterator::GetCurrent(iter));
+ }
- if (it->HolderIsReceiverOrHiddenPrototype()) return false;
+ if (it->HolderIsReceiverOrHiddenPrototype()) return false;
- if (it->ExtendingNonExtensible(receiver)) return false;
- created_new_transition_ = it->PrepareTransitionToDataProperty(
- receiver, value, NONE, store_mode);
- return it->IsCacheableTransition();
+ if (it->ExtendingNonExtensible(receiver)) return false;
+ it->PrepareTransitionToDataProperty(receiver, value, NONE,
+ store_mode);
+ return it->IsCacheableTransition();
+ }
}
}
}
receiver = it->GetStoreTarget<JSObject>();
if (it->ExtendingNonExtensible(receiver)) return false;
- created_new_transition_ =
- it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
+ it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1351,7 +1350,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
TRACE_HANDLER_STATS(isolate(), StoreGlobalIC_SlowStub);
PatchCache(name, slow_stub());
}
- TRACE_IC("StoreGlobalIC", name);
+ TraceIC("StoreGlobalIC", name);
}
script_context->set(lookup_result.slot_index, *value);
@@ -1382,7 +1381,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
update_receiver_map(object);
PatchCache(name, slow_stub());
- TRACE_IC("StoreIC", name);
+ TraceIC("StoreIC", name);
}
return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
}
@@ -1390,20 +1389,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (state() != UNINITIALIZED) {
JSObject::MakePrototypesFast(object, kStartAtPrototype, isolate());
}
- MaybeHandle<Object> cached_handler;
- Handle<Map> transition_map;
- if (object->IsJSReceiver()) {
- name = isolate()->factory()->InternalizeName(name);
- TransitionsAccessor transitions(receiver_map());
- Object* maybe_handler = transitions.SearchHandler(*name, &transition_map);
- if (maybe_handler != nullptr) {
- cached_handler = MaybeHandle<Object>(maybe_handler, isolate());
- }
- }
-
- LookupIterator it = LookupIterator::ForTransitionHandler(
- isolate(), object, name, value, cached_handler, transition_map);
-
+ LookupIterator it(object, name);
bool use_ic = FLAG_use_ic;
if (name->IsPrivate()) {
@@ -1418,8 +1404,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
use_ic = false;
}
}
-
- if (use_ic) UpdateCaches(&it, value, store_mode, cached_handler);
+ if (use_ic) UpdateCaches(&it, value, store_mode);
MAYBE_RETURN_NULL(
Object::SetProperty(&it, value, language_mode(), store_mode));
@@ -1427,61 +1412,52 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
}
void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode,
- MaybeHandle<Object> cached_handler) {
+ JSReceiver::StoreFromKeyed store_mode) {
if (state() == UNINITIALIZED && !IsStoreGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
- TRACE_IC("StoreIC", lookup->name());
+ TraceIC("StoreIC", lookup->name());
return;
}
Handle<Object> handler;
- if (!cached_handler.is_null()) {
- handler = cached_handler.ToHandleChecked();
- } else if (LookupForWrite(lookup, value, store_mode)) {
+ if (LookupForWrite(lookup, value, store_mode)) {
if (IsStoreGlobalIC()) {
if (lookup->state() == LookupIterator::DATA &&
lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
nexus()->ConfigurePropertyCellMode(lookup->GetPropertyCell());
- TRACE_IC("StoreGlobalIC", lookup->name());
+ TraceIC("StoreGlobalIC", lookup->name());
return;
}
}
- if (created_new_transition_) {
- // The first time a transition is performed, there's a good chance that
- // it won't be taken again, so don't bother creating a handler.
- TRACE_GENERIC_IC("new transition");
- TRACE_IC("StoreIC", lookup->name());
- return;
- }
handler = ComputeHandler(lookup);
} else {
- TRACE_GENERIC_IC("LookupForWrite said 'false'");
+ set_slow_stub_reason("LookupForWrite said 'false'");
handler = slow_stub();
}
PatchCache(lookup->name(), handler);
- TRACE_IC("StoreIC", lookup->name());
+ TraceIC("StoreIC", lookup->name());
}
Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
- Handle<JSObject> holder = lookup->GetHolder<JSObject>();
-
Handle<JSObject> store_target = lookup->GetStoreTarget<JSObject>();
if (store_target->IsJSGlobalObject()) {
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobalTransitionDH);
if (receiver_map()->IsJSGlobalObject()) {
DCHECK(IsStoreGlobalIC());
+#ifdef DEBUG
+ Handle<JSObject> holder = lookup->GetHolder<JSObject>();
DCHECK_EQ(*lookup->GetReceiver(), *holder);
DCHECK_EQ(*store_target, *holder);
+#endif
return StoreHandler::StoreGlobal(isolate(),
lookup->transition_cell());
}
@@ -1493,31 +1469,13 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
isolate(), receiver_map(), store_target, smi_handler, cell);
return handler;
}
- // Currently not handled by CompileStoreTransition.
- if (!holder->HasFastProperties()) {
- TRACE_GENERIC_IC("transition from slow");
- TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
- return slow_stub();
- }
+ // Dictionary-to-fast transitions are not expected and not supported.
+ DCHECK_IMPLIES(!lookup->transition_map()->is_dictionary_map(),
+ !receiver_map()->is_dictionary_map());
DCHECK(lookup->IsCacheableTransition());
- Handle<Map> transition = lookup->transition_map();
- Handle<Smi> smi_handler;
- if (transition->is_dictionary_map()) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreNormalDH);
- smi_handler = StoreHandler::StoreNormal(isolate());
- } else {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
- smi_handler = StoreHandler::StoreTransition(isolate(), transition);
- }
-
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- Handle<Object> handler = StoreHandler::StoreThroughPrototype(
- isolate(), receiver_map(), holder, smi_handler, cell);
- TransitionsAccessor(receiver_map())
- .UpdateHandler(*lookup->name(), *handler);
- return handler;
+ return StoreHandler::StoreTransition(isolate(), lookup->transition_map());
}
case LookupIterator::INTERCEPTOR: {
@@ -1537,7 +1495,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
if (!holder->HasFastProperties()) {
- TRACE_GENERIC_IC("accessor on slow map");
+ set_slow_stub_reason("accessor on slow map");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1545,19 +1503,19 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
if (accessors->IsAccessorInfo()) {
Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
if (v8::ToCData<Address>(info->setter()) == nullptr) {
- TRACE_GENERIC_IC("setter == nullptr");
+ set_slow_stub_reason("setter == nullptr");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
!lookup->HolderIsReceiverOrHiddenPrototype()) {
- TRACE_GENERIC_IC("special data property in prototype chain");
+ set_slow_stub_reason("special data property in prototype chain");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
receiver_map())) {
- TRACE_GENERIC_IC("incompatible receiver type");
+ set_slow_stub_reason("incompatible receiver type");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1575,10 +1533,18 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
- TRACE_GENERIC_IC("setter not a function");
+ set_slow_stub_reason("setter not a function");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
+
+ if (setter->IsFunctionTemplateInfo() &&
+ FunctionTemplateInfo::cast(*setter)->BreakAtEntry()) {
+ // Do not install an IC if the api function has a breakpoint.
+ TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
+ return slow_stub();
+ }
+
CallOptimization call_optimization(setter);
if (call_optimization.is_simple_api_call()) {
if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
@@ -1601,11 +1567,11 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
isolate(), receiver_map(), holder, smi_handler, data_cell,
context_cell);
}
- TRACE_GENERIC_IC("incompatible receiver");
+ set_slow_stub_reason("incompatible receiver");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
} else if (setter->IsFunctionTemplateInfo()) {
- TRACE_GENERIC_IC("setter non-simple template");
+ set_slow_stub_reason("setter non-simple template");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1660,7 +1626,7 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
// -------------- Constant properties --------------
DCHECK_EQ(kDescriptor, lookup->property_details().location());
- TRACE_GENERIC_IC("constant property");
+ set_slow_stub_reason("constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
@@ -1681,20 +1647,22 @@ Handle<Object> StoreIC::ComputeHandler(LookupIterator* lookup) {
}
void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode) {
+ KeyedAccessStoreMode store_mode,
+ bool receiver_was_cow) {
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.empty()) {
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
- store_mode = GetNonTransitioningStoreMode(store_mode);
+ store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
Handle<Object> handler = StoreElementHandler(monomorphic_map, store_mode);
return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
}
for (Handle<Map> map : target_receiver_maps) {
if (!map.is_null() && map->instance_type() == JS_VALUE_TYPE) {
- TRACE_GENERIC_IC("JSValue");
+ DCHECK(!IsStoreInArrayLiteralICKind(kind()));
+ set_slow_stub_reason("JSValue");
return;
}
}
@@ -1719,7 +1687,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// If the "old" and "new" maps are in the same elements map family, or
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
- store_mode = GetNonTransitioningStoreMode(store_mode);
+ store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
Handle<Object> handler =
StoreElementHandler(transitioned_receiver_map, store_mode);
ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
@@ -1753,7 +1721,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the megamorphic stub which can handle everything.
- TRACE_GENERIC_IC("same map added twice");
+ set_slow_stub_reason("same map added twice");
return;
}
@@ -1763,12 +1731,12 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
- store_mode = GetNonTransitioningStoreMode(store_mode);
+ store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
if (old_store_mode != STANDARD_STORE) {
if (store_mode == STANDARD_STORE) {
store_mode = old_store_mode;
} else if (store_mode != old_store_mode) {
- TRACE_GENERIC_IC("store mode mismatch");
+ set_slow_stub_reason("store mode mismatch");
return;
}
}
@@ -1780,12 +1748,15 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
size_t external_arrays = 0;
for (Handle<Map> map : target_receiver_maps) {
if (map->has_fixed_typed_array_elements()) {
+ DCHECK(!IsStoreInArrayLiteralICKind(kind()));
external_arrays++;
}
}
if (external_arrays != 0 &&
external_arrays != target_receiver_maps.size()) {
- TRACE_GENERIC_IC("unsupported combination of external and normal arrays");
+ DCHECK(!IsStoreInArrayLiteralICKind(kind()));
+ set_slow_stub_reason(
+ "unsupported combination of external and normal arrays");
return;
}
}
@@ -1837,7 +1808,8 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- DCHECK(!receiver_map->DictionaryElementsInPrototypeChainOnly());
+ DCHECK_IMPLIES(receiver_map->DictionaryElementsInPrototypeChainOnly(),
+ IsStoreInArrayLiteralICKind(kind()));
if (receiver_map->IsJSProxyMap()) {
return StoreHandler::StoreProxy(isolate());
@@ -1857,14 +1829,23 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
.GetCode();
if (receiver_map->has_fixed_typed_array_elements()) return stub;
+ } else if (IsStoreInArrayLiteralICKind(kind())) {
+ TRACE_HANDLER_STATS(isolate(), StoreInArrayLiteralIC_SlowStub);
+ stub = StoreInArrayLiteralSlowStub(isolate(), store_mode).GetCode();
} else {
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
stub = StoreSlowElementStub(isolate(), store_mode).GetCode();
}
+
+ if (IsStoreInArrayLiteralICKind(kind())) return stub;
+
Handle<Object> validity_cell =
Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
- if (validity_cell.is_null()) return stub;
+ if (validity_cell->IsSmi()) {
+ // There's no prototype validity cell to check, so we can just use the stub.
+ return stub;
+ }
Handle<StoreHandler> handler = isolate()->factory()->NewStoreHandler(0);
handler->set_validity_cell(*validity_cell);
handler->set_smi_handler(*stub);
@@ -1895,7 +1876,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
- handler = BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
+ handler = slow_stub();
} else {
{
@@ -1968,12 +1949,8 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
receiver->map()->has_fixed_typed_array_elements() && oob_access) {
return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
}
- Heap* heap = receiver->GetHeap();
- if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
- return STORE_NO_TRANSITION_HANDLE_COW;
- } else {
- return STANDARD_STORE;
- }
+ return receiver->elements()->IsCowArray() ? STORE_NO_TRANSITION_HANDLE_COW
+ : STANDARD_STORE;
}
}
@@ -2009,8 +1986,8 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Object);
if (vector_needs_update()) {
if (ConfigureVectorState(MEGAMORPHIC, key)) {
- TRACE_GENERIC_IC("unhandled internalized string key");
- TRACE_IC("StoreIC", key);
+ set_slow_stub_reason("unhandled internalized string key");
+ TraceIC("StoreIC", key);
}
}
return store_handle;
@@ -2026,7 +2003,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// the runtime to enable optimization of element hole access.
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
if (heap_object->map()->IsMapInArrayPrototypeChain()) {
- TRACE_GENERIC_IC("map in array prototype");
+ set_slow_stub_reason("map in array prototype");
use_ic = false;
}
}
@@ -2057,6 +2034,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
DCHECK(store_handle.is_null());
+ bool receiver_was_cow =
+ object->IsJSArray() &&
+ Handle<JSArray>::cast(object)->elements()->IsCowArray();
ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
Runtime::SetObjectProperty(isolate(), object, key,
value, language_mode()),
@@ -2065,45 +2045,92 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (use_ic) {
if (!old_receiver_map.is_null()) {
if (is_arguments) {
- TRACE_GENERIC_IC("arguments receiver");
+ set_slow_stub_reason("arguments receiver");
} else if (key_is_valid_index) {
if (old_receiver_map->is_abandoned_prototype_map()) {
- TRACE_GENERIC_IC("receiver with prototype map");
+ set_slow_stub_reason("receiver with prototype map");
} else if (!old_receiver_map
->DictionaryElementsInPrototypeChainOnly()) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
- UpdateStoreElement(old_receiver_map, store_mode);
+ UpdateStoreElement(old_receiver_map, store_mode, receiver_was_cow);
} else {
- TRACE_GENERIC_IC("dictionary or proxy prototype");
+ set_slow_stub_reason("dictionary or proxy prototype");
}
} else {
- TRACE_GENERIC_IC("non-smi-like key");
+ set_slow_stub_reason("non-smi-like key");
}
} else {
- TRACE_GENERIC_IC("non-JSObject receiver");
+ set_slow_stub_reason("non-JSObject receiver");
}
}
if (vector_needs_update()) {
ConfigureVectorState(MEGAMORPHIC, key);
}
- TRACE_IC("StoreIC", key);
+ TraceIC("StoreIC", key);
return store_handle;
}
+namespace {
+void StoreOwnElement(Handle<JSArray> array, Handle<Object> index,
+ Handle<Object> value) {
+ DCHECK(index->IsNumber());
+ bool success = false;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ array->GetIsolate(), array, index, &success, LookupIterator::OWN);
+ DCHECK(success);
+
+ CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE,
+ kThrowOnError)
+ .FromJust());
+}
+} // namespace
+
+void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
+ Handle<Object> value) {
+ DCHECK(!array->map()->IsMapInArrayPrototypeChain());
+ DCHECK(index->IsNumber());
+
+ if (!FLAG_use_ic || MigrateDeprecated(array)) {
+ StoreOwnElement(array, index, value);
+ TraceIC("StoreInArrayLiteralIC", index);
+ return;
+ }
+
+ // TODO(neis): Convert HeapNumber to Smi if possible?
+
+ KeyedAccessStoreMode store_mode = STANDARD_STORE;
+ if (index->IsSmi()) {
+ DCHECK_GE(Smi::ToInt(*index), 0);
+ uint32_t index32 = static_cast<uint32_t>(Smi::ToInt(*index));
+ store_mode = GetStoreMode(array, index32, value);
+ }
+
+ Handle<Map> old_array_map(array->map(), isolate());
+ bool array_was_cow = array->elements()->IsCowArray();
+ StoreOwnElement(array, index, value);
-#undef TRACE_IC
+ if (index->IsSmi()) {
+ DCHECK(!old_array_map->is_abandoned_prototype_map());
+ UpdateStoreElement(old_array_map, store_mode, array_was_cow);
+ } else {
+ set_slow_stub_reason("index out of Smi range");
+ }
+ if (vector_needs_update()) {
+ ConfigureVectorState(MEGAMORPHIC, index);
+ }
+ TraceIC("StoreInArrayLiteralIC", index);
+}
// ----------------------------------------------------------------------------
// Static IC stub generators.
//
-// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -2137,7 +2164,6 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
}
}
-// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
@@ -2199,7 +2225,6 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
return *result;
}
-// Used from ic-<arch>.cc
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -2214,7 +2239,6 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
}
-// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2311,7 +2335,6 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
Runtime::SetObjectProperty(isolate, global, name, value, language_mode));
}
-// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
@@ -2322,11 +2345,24 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- KeyedStoreIC ic(isolate, vector, vector_slot);
- ic.UpdateState(receiver, key);
- RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
-}
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ // The elements store stubs miss into this function, but they are shared by
+ // different ICs.
+ if (IsKeyedStoreICKind(kind)) {
+ KeyedStoreIC ic(isolate, vector, vector_slot);
+ ic.UpdateState(receiver, key);
+ RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
+ } else {
+ DCHECK(IsStoreInArrayLiteralICKind(kind));
+ DCHECK(receiver->IsJSArray());
+ DCHECK(key->IsNumber());
+ StoreInArrayLiteralIC ic(isolate, vector, vector_slot);
+ ic.UpdateState(receiver, key);
+ ic.Store(Handle<JSArray>::cast(receiver), key, value);
+ return *value;
+ }
+}
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
@@ -2338,12 +2374,24 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
Handle<Object> object = args.at(3);
Handle<Object> key = args.at(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+ DCHECK(IsStoreICKind(kind) || IsKeyedStoreICKind(kind));
+ LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
RETURN_RESULT_OR_FAILURE(
isolate,
Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
}
+RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Slow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ // Runtime functions don't follow the IC's calling convention.
+ Handle<Object> value = args.at(0);
+ Handle<Object> array = args.at(1);
+ Handle<Object> index = args.at(2);
+ StoreOwnElement(Handle<JSArray>::cast(array), index, value);
+ return *value;
+}
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
HandleScope scope(isolate);
@@ -2356,14 +2404,23 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
Handle<Smi> slot = args.at<Smi>(4);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
- LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
+ FeedbackSlotKind kind = vector->GetKind(vector_slot);
+
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
}
- RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+
+ if (IsStoreInArrayLiteralICKind(kind)) {
+ StoreOwnElement(Handle<JSArray>::cast(object), key, value);
+ return *value;
+ } else {
+ DCHECK(IsKeyedStoreICKind(kind) || IsStoreICKind(kind));
+ LanguageMode language_mode = GetLanguageModeFromSlotKind(kind);
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
+ }
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 8a47d8d19c..020b8dcd26 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -7,8 +7,10 @@
#include <vector>
-#include "src/factory.h"
#include "src/feedback-vector.h"
+#include "src/heap/factory.h"
+#include "src/ic/stub-cache.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects/map.h"
@@ -58,7 +60,7 @@ class IC {
}
bool IsAnyStore() const {
return IsStoreIC() || IsStoreOwnIC() || IsStoreGlobalIC() ||
- IsKeyedStoreIC();
+ IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind());
}
static inline bool IsHandler(Object* object);
@@ -131,7 +133,10 @@ class IC {
bool IsStoreIC() const { return IsStoreICKind(kind_); }
bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
- bool is_keyed() const { return IsKeyedLoadIC() || IsKeyedStoreIC(); }
+ bool is_keyed() const {
+ return IsKeyedLoadIC() || IsKeyedStoreIC() ||
+ IsStoreInArrayLiteralICKind(kind_);
+ }
bool ShouldRecomputeHandler(Handle<String> name);
Handle<Map> receiver_map() { return receiver_map_; }
@@ -227,8 +232,8 @@ class LoadIC : public IC {
return ShouldThrowReferenceError(kind());
}
- MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
- Handle<Name> name);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<Name> name);
protected:
virtual Handle<Code> slow_stub() const {
@@ -252,7 +257,7 @@ class LoadGlobalIC : public LoadIC {
FeedbackSlot slot)
: LoadIC(isolate, vector, slot) {}
- MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Name> name);
protected:
Handle<Code> slow_stub() const override {
@@ -266,8 +271,8 @@ class KeyedLoadIC : public LoadIC {
FeedbackSlot slot)
: LoadIC(isolate, vector, slot) {}
- MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
- Handle<Object> key);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+ Handle<Object> key);
protected:
// receiver is HeapObject because it could be a String or a JSObject
@@ -300,7 +305,7 @@ class StoreIC : public IC {
LanguageMode language_mode() const { return nexus()->GetLanguageMode(); }
- MUST_USE_RESULT MaybeHandle<Object> Store(
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode =
JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
@@ -318,15 +323,12 @@ class StoreIC : public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
- JSReceiver::StoreFromKeyed store_mode,
- MaybeHandle<Object> cached_handler);
+ JSReceiver::StoreFromKeyed store_mode);
private:
Handle<Object> ComputeHandler(LookupIterator* lookup);
friend class IC;
-
- bool created_new_transition_ = false;
};
class StoreGlobalIC : public StoreIC {
@@ -335,8 +337,8 @@ class StoreGlobalIC : public StoreIC {
FeedbackSlot slot)
: StoreIC(isolate, vector, slot) {}
- MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Name> name,
- Handle<Object> value);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Name> name,
+ Handle<Object> value);
protected:
Handle<Code> slow_stub() const override {
@@ -360,13 +362,18 @@ class KeyedStoreIC : public StoreIC {
FeedbackSlot slot)
: StoreIC(isolate, vector, slot) {}
- MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
- Handle<Object> name,
- Handle<Object> value);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Store(Handle<Object> object,
+ Handle<Object> name,
+ Handle<Object> value);
protected:
void UpdateStoreElement(Handle<Map> receiver_map,
- KeyedAccessStoreMode store_mode);
+ KeyedAccessStoreMode store_mode,
+ bool receiver_was_cow);
+
+ Handle<Code> slow_stub() const override {
+ return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
+ }
private:
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
@@ -382,6 +389,22 @@ class KeyedStoreIC : public StoreIC {
friend class IC;
};
+class StoreInArrayLiteralIC : public KeyedStoreIC {
+ public:
+ StoreInArrayLiteralIC(Isolate* isolate, Handle<FeedbackVector> vector,
+ FeedbackSlot slot)
+ : KeyedStoreIC(isolate, vector, slot) {
+ DCHECK(IsStoreInArrayLiteralICKind(kind()));
+ }
+
+ void Store(Handle<JSArray> array, Handle<Object> index, Handle<Object> value);
+
+ private:
+ Handle<Code> slow_stub() const override {
+ return BUILTIN_CODE(isolate(), StoreInArrayLiteralIC_Slow);
+ }
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 4997267ddd..f2f161f3b6 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -41,8 +41,15 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Node* value, Node* context, Label* slow);
void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
- const StoreICParameters* p, Label* slow,
- UseStubCache use_stub_cache = kUseStubCache);
+ const StoreICParameters* p,
+ ExitPoint* exit_point, Label* slow,
+ bool assume_strict_language_mode = false);
+
+ void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
+ const StoreICParameters* p, Label* slow) {
+ ExitPoint direct_exit(this);
+ EmitGenericPropertyStore(receiver, receiver_map, p, &direct_exit, slow);
+ }
void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
Label* non_fast_elements,
@@ -77,13 +84,6 @@ class KeyedStoreGenericAssembler : public AccessorAssembler {
Variable* var_accessor_pair,
Variable* var_accessor_holder,
Label* readonly, Label* bailout);
-
- void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
- Node* value, Label* bailout);
- void OverwriteExistingFastProperty(Node* object, Node* object_map,
- Node* properties, Node* descriptors,
- Node* descriptor_name_index, Node* details,
- Node* value, Label* slow);
};
void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state) {
@@ -243,10 +243,11 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
GotoIf(IsDictionaryMap(receiver_map), slow);
// The length property is non-configurable, so it's guaranteed to always
// be the first property.
- Node* descriptors = LoadMapDescriptors(receiver_map);
- Node* details =
- LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
- GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), slow);
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(receiver_map);
+ TNode<Int32T> details = LoadAndUntagToWord32FixedArrayElement(
+ descriptors, DescriptorArray::ToDetailsIndex(0));
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ slow);
}
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -547,8 +548,8 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
Label next_proto(this);
{
Label found(this), found_fast(this), found_dict(this), found_global(this);
- VARIABLE(var_meta_storage, MachineRepresentation::kTagged);
- VARIABLE(var_entry, MachineType::PointerRepresentation());
+ TVARIABLE(HeapObject, var_meta_storage);
+ TVARIABLE(IntPtrT, var_entry);
TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
&found_dict, &found_global, &var_meta_storage,
&var_entry, &next_proto, bailout);
@@ -616,148 +617,9 @@ void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
BIND(&ok_to_write);
}
-void KeyedStoreGenericAssembler::CheckFieldType(Node* descriptors,
- Node* name_index,
- Node* representation,
- Node* value, Label* bailout) {
- Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
- // Ignore FLAG_track_fields etc. and always emit code for all checks,
- // because this builtin is part of the snapshot and therefore should
- // be flag independent.
- GotoIf(Word32Equal(representation, Int32Constant(Representation::kSmi)),
- &r_smi);
- GotoIf(Word32Equal(representation, Int32Constant(Representation::kDouble)),
- &r_double);
- GotoIf(
- Word32Equal(representation, Int32Constant(Representation::kHeapObject)),
- &r_heapobject);
- GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
- bailout);
- CSA_ASSERT(this, Word32Equal(representation,
- Int32Constant(Representation::kTagged)));
- Goto(&all_fine);
-
- BIND(&r_smi);
- { Branch(TaggedIsSmi(value), &all_fine, bailout); }
-
- BIND(&r_double);
- {
- GotoIf(TaggedIsSmi(value), &all_fine);
- Node* value_map = LoadMap(value);
- // While supporting mutable HeapNumbers would be straightforward, such
- // objects should not end up here anyway.
- CSA_ASSERT(this,
- WordNotEqual(value_map,
- LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
- Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
- }
-
- BIND(&r_heapobject);
- {
- GotoIf(TaggedIsSmi(value), bailout);
- Node* field_type =
- LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
- intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
- intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
- // FieldType::None can't hold any value.
- GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
- // FieldType::Any can hold any value.
- GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
- CSA_ASSERT(this, IsWeakCell(field_type));
- // Cleared WeakCells count as FieldType::None, which can't hold any value.
- field_type = LoadWeakCellValue(field_type, bailout);
- // FieldType::Class(...) performs a map check.
- CSA_ASSERT(this, IsMap(field_type));
- Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
- }
-
- BIND(&all_fine);
-}
-
-void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
- Node* object, Node* object_map, Node* properties, Node* descriptors,
- Node* descriptor_name_index, Node* details, Node* value, Label* slow) {
- // Properties in descriptors can't be overwritten without map transition.
- GotoIf(Word32NotEqual(DecodeWord32<PropertyDetails::LocationField>(details),
- Int32Constant(kField)),
- slow);
-
- if (FLAG_track_constant_fields) {
- // TODO(ishell): Taking the slow path is not necessary if new and old
- // values are identical.
- GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
- Int32Constant(kConst)),
- slow);
- }
-
- Label done(this);
- Node* representation =
- DecodeWord32<PropertyDetails::RepresentationField>(details);
-
- CheckFieldType(descriptors, descriptor_name_index, representation, value,
- slow);
- Node* field_index =
- DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
- field_index =
- IntPtrAdd(field_index, LoadMapInobjectPropertiesStartInWords(object_map));
- Node* instance_size_in_words = LoadMapInstanceSizeInWords(object_map);
-
- Label inobject(this), backing_store(this);
- Branch(UintPtrLessThan(field_index, instance_size_in_words), &inobject,
- &backing_store);
-
- BIND(&inobject);
- {
- Node* field_offset = TimesPointerSize(field_index);
- Label tagged_rep(this), double_rep(this);
- Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
- &double_rep, &tagged_rep);
- BIND(&double_rep);
- {
- Node* double_value = ChangeNumberToFloat64(value);
- if (FLAG_unbox_double_fields) {
- StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
- MachineRepresentation::kFloat64);
- } else {
- Node* mutable_heap_number = LoadObjectField(object, field_offset);
- StoreHeapNumberValue(mutable_heap_number, double_value);
- }
- Goto(&done);
- }
-
- BIND(&tagged_rep);
- {
- StoreObjectField(object, field_offset, value);
- Goto(&done);
- }
- }
-
- BIND(&backing_store);
- {
- Node* backing_store_index = IntPtrSub(field_index, instance_size_in_words);
- Label tagged_rep(this), double_rep(this);
- Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
- &double_rep, &tagged_rep);
- BIND(&double_rep);
- {
- Node* double_value = ChangeNumberToFloat64(value);
- Node* mutable_heap_number =
- LoadFixedArrayElement(properties, backing_store_index);
- StoreHeapNumberValue(mutable_heap_number, double_value);
- Goto(&done);
- }
- BIND(&tagged_rep);
- {
- StoreFixedArrayElement(properties, backing_store_index, value);
- Goto(&done);
- }
- }
- BIND(&done);
-}
-
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
- Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
- UseStubCache use_stub_cache) {
+ Node* receiver, Node* receiver_map, const StoreICParameters* p,
+ ExitPoint* exit_point, Label* slow, bool assume_strict_language_mode) {
VARIABLE(var_accessor_pair, MachineRepresentation::kTagged);
VARIABLE(var_accessor_holder, MachineRepresentation::kTagged);
Label stub_cache(this), fast_properties(this), dictionary_properties(this),
@@ -771,8 +633,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Comment("fast property store");
Node* descriptors = LoadMapDescriptors(receiver_map);
Label descriptor_found(this), lookup_transition(this);
- VARIABLE(var_name_index, MachineType::PointerRepresentation());
- Label* notfound = use_stub_cache == kUseStubCache ? &stub_cache : slow;
+ TVARIABLE(IntPtrT, var_name_index);
DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
&var_name_index, &lookup_transition);
@@ -795,49 +656,80 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&data_property);
{
CheckForAssociatedProtector(p->name, slow);
- Node* properties = LoadFastProperties(receiver);
- OverwriteExistingFastProperty(receiver, receiver_map, properties,
- descriptors, name_index, details,
- p->value, slow);
- Return(p->value);
+ OverwriteExistingFastDataProperty(receiver, receiver_map, descriptors,
+ name_index, details, p->value, slow,
+ false);
+ exit_point->Return(p->value);
}
}
-
BIND(&lookup_transition);
{
Comment("lookup transition");
- VARIABLE(var_handler, MachineRepresentation::kTagged);
- Label check_key(this), found_handler(this, &var_handler);
- Node* maybe_handler =
+ TVARIABLE(Map, var_transition_map);
+ Label simple_transition(this), transition_array(this),
+ found_handler_candidate(this);
+ TNode<Object> maybe_handler =
LoadObjectField(receiver_map, Map::kTransitionsOrPrototypeInfoOffset);
- GotoIf(TaggedIsSmi(maybe_handler), notfound);
- GotoIf(HasInstanceType(maybe_handler, STORE_HANDLER_TYPE), &check_key);
- // TODO(jkummerow): Consider implementing TransitionArray search.
- Goto(notfound);
+ // SMI -> slow
+ // cleared weak reference -> slow
+ // weak reference -> simple_transition
+ // strong reference -> transition_array
+ VARIABLE(var_transition_map_or_array, MachineRepresentation::kTagged);
+ DispatchMaybeObject(maybe_handler, slow, slow, &simple_transition,
+ &transition_array, &var_transition_map_or_array);
+
+ BIND(&simple_transition);
+ {
+ var_transition_map = CAST(var_transition_map_or_array.value());
+ Goto(&found_handler_candidate);
+ }
- BIND(&check_key);
+ BIND(&transition_array);
{
- Node* transition_cell =
- LoadObjectField(maybe_handler, StoreHandler::kData1Offset);
- Node* transition = LoadWeakCellValue(transition_cell, slow);
- Node* transition_bitfield3 = LoadMapBitField3(transition);
- GotoIf(IsSetWord32<Map::IsDeprecatedBit>(transition_bitfield3), slow);
- Node* nof =
- DecodeWord32<Map::NumberOfOwnDescriptorsBits>(transition_bitfield3);
- Node* last_added = Int32Sub(nof, Int32Constant(1));
- Node* transition_descriptors = LoadMapDescriptors(transition);
- Node* key = DescriptorArrayGetKey(transition_descriptors, last_added);
- GotoIf(WordNotEqual(key, p->name), slow);
- var_handler.Bind(maybe_handler);
- Goto(&found_handler);
+ TNode<Map> maybe_handler_map =
+ LoadMap(CAST(var_transition_map_or_array.value()));
+ GotoIfNot(IsTransitionArrayMap(maybe_handler_map), slow);
+
+ TVARIABLE(IntPtrT, var_name_index);
+ Label if_found_candidate(this);
+ TNode<TransitionArray> transitions =
+ CAST(var_transition_map_or_array.value());
+ TransitionLookup(p->name, transitions, &if_found_candidate,
+ &var_name_index, slow);
+
+ BIND(&if_found_candidate);
+ {
+ // Given that
+ // 1) transitions with the same name are ordered in the transition
+ // array by PropertyKind and then by PropertyAttributes values,
+ // 2) kData < kAccessor,
+ // 3) NONE == 0,
+ // 4) properties with private symbol names are guaranteed to be
+ // non-enumerable (so DONT_ENUM bit in attributes is always set),
+ // the resulting map of transitioning store if it exists in the
+ // transition array is expected to be the first among the transitions
+ // with the same name.
+ // See TransitionArray::CompareDetails() for details.
+ STATIC_ASSERT(kData == 0);
+ STATIC_ASSERT(NONE == 0);
+ const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex -
+ TransitionArray::kEntryKeyIndex) *
+ kPointerSize;
+ TNode<WeakCell> transition_map_weak_cell = CAST(LoadFixedArrayElement(
+ transitions, var_name_index.value(), kKeyToTargetOffset));
+ var_transition_map =
+ CAST(LoadWeakCellValue(transition_map_weak_cell, slow));
+ Goto(&found_handler_candidate);
+ }
}
- BIND(&found_handler);
+ BIND(&found_handler_candidate);
{
- Comment("KeyedStoreGeneric found transition handler");
- HandleStoreICHandlerCase(p, var_handler.value(), notfound,
- ICMode::kNonGlobalIC);
+ // Validate the transition handler candidate and apply the transition.
+ HandleStoreICTransitionMapHandlerCase(p, var_transition_map.value(),
+ slow, true);
+ exit_point->Return(p->value);
}
}
}
@@ -871,7 +763,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
CheckForAssociatedProtector(p->name, slow);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
p->value);
- Return(p->value);
+ exit_point->Return(p->value);
}
}
@@ -891,11 +783,11 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
InvalidateValidityCellIfPrototype(receiver_map, bitfield2);
Add<NameDictionary>(properties, p->name, p->value,
&add_dictionary_property_slow);
- Return(p->value);
+ exit_point->Return(p->value);
BIND(&add_dictionary_property_slow);
- TailCallRuntime(Runtime::kAddDictionaryProperty, p->context, p->receiver,
- p->name, p->value);
+ exit_point->ReturnCallRuntime(Runtime::kAddDictionaryProperty, p->context,
+ p->receiver, p->name, p->value);
}
}
@@ -913,13 +805,17 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
Callable callable = CodeFactory::Call(isolate());
CallJS(callable, p->context, setter, receiver, p->value);
- Return(p->value);
+ exit_point->Return(p->value);
BIND(&not_callable);
{
Label strict(this);
- BranchIfStrictMode(p->vector, p->slot, &strict);
- Return(p->value);
+ if (assume_strict_language_mode) {
+ Goto(&strict);
+ } else {
+ BranchIfStrictMode(p->vector, p->slot, &strict);
+ exit_point->Return(p->value);
+ }
BIND(&strict);
{
@@ -932,9 +828,12 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&readonly);
{
Label strict(this);
- BranchIfStrictMode(p->vector, p->slot, &strict);
- Return(p->value);
-
+ if (assume_strict_language_mode) {
+ Goto(&strict);
+ } else {
+ BranchIfStrictMode(p->vector, p->slot, &strict);
+ exit_point->Return(p->value);
+ }
BIND(&strict);
{
Node* type = Typeof(p->receiver);
@@ -942,27 +841,6 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
p->name, type, p->receiver);
}
}
-
- if (use_stub_cache == kUseStubCache) {
- BIND(&stub_cache);
- Comment("stub cache probe");
- VARIABLE(var_handler, MachineRepresentation::kTagged);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
- &found_handler, &var_handler, &stub_cache_miss);
- BIND(&found_handler);
- {
- Comment("KeyedStoreGeneric found handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss,
- ICMode::kNonGlobalIC);
- }
- BIND(&stub_cache_miss);
- {
- Comment("KeyedStoreGeneric_miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value,
- p->slot, p->vector, p->receiver, p->name);
- }
- }
}
void KeyedStoreGenericAssembler::KeyedStoreGeneric() {
@@ -1050,9 +928,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
Node* instance_type = LoadMapInstanceType(receiver_map);
// Receivers requiring non-standard element accesses (interceptors, access
// checks, strings and string wrappers, proxies) are handled in the runtime.
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
- &miss);
+ GotoIf(IsSpecialReceiverInstanceType(instance_type), &miss);
// Optimistically write the state transition to the vector.
StoreFeedbackVectorSlot(vector, slot,
@@ -1060,8 +936,7 @@ void KeyedStoreGenericAssembler::StoreIC_Uninitialized() {
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
StoreICParameters p(context, receiver, name, value, slot, vector);
- EmitGenericPropertyStore(receiver, receiver_map, &p, &miss,
- kDontUseStubCache);
+ EmitGenericPropertyStore(receiver, receiver_map, &p, &miss);
BIND(&miss);
{
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index 0849d44202..5e5302fbbe 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -66,6 +66,8 @@ function toString(obj)
}
/**
+ * TODO(luoe): remove type-check suppression once bigint is supported by closure.
+ * @suppress {checkTypes}
* @param {*} obj
* @return {string}
*/
@@ -73,6 +75,8 @@ function toStringDescription(obj)
{
if (typeof obj === "number" && obj === 0 && 1 / obj < 0)
return "-0"; // Negative zero.
+ if (typeof obj === "bigint")
+ return toString(obj) + "n";
return toString(obj);
}
@@ -167,6 +171,7 @@ InjectedScript.primitiveTypes = {
"boolean": true,
"number": true,
"string": true,
+ "bigint": true,
__proto__: null
}
@@ -747,6 +752,13 @@ InjectedScript.RemoteObject = function(object, objectGroupName, doNotBind, force
}
}
+ // The "n" suffix of bigint primitives are not JSON serializable.
+ if (this.type === "bigint") {
+ delete this.value;
+ this.description = toStringDescription(object);
+ this.unserializableValue = this.description;
+ }
+
return;
}
@@ -772,7 +784,7 @@ InjectedScript.RemoteObject = function(object, objectGroupName, doNotBind, force
if (generatePreview && this.type === "object") {
if (this.subtype === "proxy")
this.preview = this._generatePreview(InjectedScriptHost.proxyTargetValue(object), undefined, columnNames, isTable, skipEntriesPreview);
- else if (this.subtype !== "node")
+ else
this.preview = this._generatePreview(object, undefined, columnNames, isTable, skipEntriesPreview);
}
@@ -1002,9 +1014,10 @@ InjectedScript.RemoteObject.prototype = {
var maxLength = 100;
if (InjectedScript.primitiveTypes[type]) {
- if (type === "string" && value.length > maxLength)
- value = this._abbreviateString(value, maxLength, true);
- push(preview.properties, { name: name, type: type, value: toStringDescription(value), __proto__: null });
+ var valueString = type === "string" ? value : toStringDescription(value);
+ if (valueString.length > maxLength)
+ valueString = this._abbreviateString(valueString, maxLength, true);
+ push(preview.properties, { name: name, type: type, value: valueString, __proto__: null });
continue;
}
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index d13e5f8695..662a5678b0 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -50,6 +50,9 @@ namespace v8_inspector {
namespace {
static const char privateKeyName[] = "v8-inspector#injectedScript";
static const char kGlobalHandleLabel[] = "DevTools console";
+static bool isResolvableNumberLike(String16 query) {
+ return query == "Infinity" || query == "-Infinity" || query == "NaN";
+}
} // namespace
using protocol::Array;
@@ -530,10 +533,17 @@ Response InjectedScript::resolveCallArgument(
return findObject(*remoteObjectId, result);
}
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
- String16 value =
- callArgument->hasValue()
- ? "(" + callArgument->getValue(nullptr)->serialize() + ")"
- : "Number(\"" + callArgument->getUnserializableValue("") + "\")";
+ String16 value;
+ if (callArgument->hasValue()) {
+ value = "(" + callArgument->getValue(nullptr)->serialize() + ")";
+ } else {
+ String16 unserializableValue = callArgument->getUnserializableValue("");
+ // Protect against potential identifier resolution for NaN and Infinity.
+ if (isResolvableNumberLike(unserializableValue))
+ value = "Number(\"" + unserializableValue + "\")";
+ else
+ value = unserializableValue;
+ }
if (!m_context->inspector()
->compileAndRunInternalScript(
m_context->context(), toV8String(m_context->isolate(), value))
@@ -608,6 +618,9 @@ Response InjectedScript::wrapEvaluateResult(
m_lastEvaluationResult.AnnotateStrongRetainer(kGlobalHandleLabel);
}
} else {
+ if (tryCatch.HasTerminated() || !tryCatch.CanContinue()) {
+ return Response::Error("Execution was terminated");
+ }
v8::Local<v8::Value> exception = tryCatch.Exception();
Response response =
wrapObject(exception, objectGroup, false,
@@ -641,6 +654,7 @@ InjectedScript::Scope::Scope(V8InspectorSessionImpl* session)
m_ignoreExceptionsAndMuteConsole(false),
m_previousPauseOnExceptionsState(v8::debug::NoBreakOnException),
m_userGesture(false),
+ m_allowEval(false),
m_contextGroupId(session->contextGroupId()),
m_sessionId(session->sessionId()) {}
@@ -653,6 +667,7 @@ Response InjectedScript::Scope::initialize() {
if (!response.isSuccess()) return response;
m_context = m_injectedScript->context()->context();
m_context->Enter();
+ if (m_allowEval) m_context->AllowCodeGenerationFromStrings(true);
return Response::OK();
}
@@ -688,9 +703,17 @@ void InjectedScript::Scope::pretendUserGesture() {
m_inspector->client()->beginUserGesture();
}
+void InjectedScript::Scope::allowCodeGenerationFromStrings() {
+ DCHECK(!m_allowEval);
+ if (m_context->IsCodeGenerationFromStringsAllowed()) return;
+ m_allowEval = true;
+ m_context->AllowCodeGenerationFromStrings(true);
+}
+
void InjectedScript::Scope::cleanup() {
m_commandLineAPIScope.reset();
if (!m_context.IsEmpty()) {
+ if (m_allowEval) m_context->AllowCodeGenerationFromStrings(false);
m_context->Exit();
m_context.Clear();
}
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 90a1ed3171..32969a6e7c 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -120,6 +120,7 @@ class InjectedScript final {
void installCommandLineAPI();
void ignoreExceptionsAndMuteConsole();
void pretendUserGesture();
+ void allowCodeGenerationFromStrings();
v8::Local<v8::Context> context() const { return m_context; }
InjectedScript* injectedScript() const { return m_injectedScript; }
const v8::TryCatch& tryCatch() const { return m_tryCatch; }
@@ -144,6 +145,7 @@ class InjectedScript final {
bool m_ignoreExceptionsAndMuteConsole;
v8::debug::ExceptionBreakState m_previousPauseOnExceptionsState;
bool m_userGesture;
+ bool m_allowEval;
int m_contextGroupId;
int m_sessionId;
};
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index fdb2b64b90..fa073128b3 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -11,7 +11,7 @@
},
{
"domain": "Runtime",
- "async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript"],
+ "async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript", "terminateExecution"],
"exported": ["StackTrace", "StackTraceId", "RemoteObject", "ExecutionContextId"]
},
{
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index a0f7fcd7ed..eb8785dfde 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -1781,14 +1781,8 @@
},
{
"id": "UnserializableValue",
- "description": "Primitive value which cannot be JSON-stringified.",
- "type": "string",
- "enum": [
- "Infinity",
- "NaN",
- "-Infinity",
- "-0"
- ]
+ "description": "Primitive value which cannot be JSON-stringified. Includes values `-0`, `NaN`, `Infinity`,\n`-Infinity`, and bigint literals.",
+ "type": "string"
},
{
"id": "RemoteObject",
@@ -1806,7 +1800,8 @@
"string",
"number",
"boolean",
- "symbol"
+ "symbol",
+ "bigint"
]
},
{
@@ -1922,7 +1917,8 @@
"string",
"number",
"boolean",
- "symbol"
+ "symbol",
+ "bigint"
]
},
{
@@ -1997,7 +1993,8 @@
"number",
"boolean",
"symbol",
- "accessor"
+ "accessor",
+ "bigint"
]
},
{
@@ -2573,6 +2570,13 @@
"description": "Whether execution should `await` for resulting value and return once awaited promise is\nresolved.",
"optional": true,
"type": "boolean"
+ },
+ {
+ "name": "throwOnSideEffect",
+ "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation.",
+ "experimental": true,
+ "optional": true,
+ "type": "boolean"
}
],
"returns": [
@@ -2590,6 +2594,35 @@
]
},
{
+ "name": "getIsolateId",
+ "description": "Returns the isolate id.",
+ "experimental": true,
+ "returns": [
+ {
+ "name": "id",
+ "description": "The isolate id.",
+ "type": "string"
+ }
+ ]
+ },
+ {
+ "name": "getHeapUsage",
+ "description": "Returns the JavaScript heap usage.\nIt is the total usage of the corresponding isolate not scoped to a particular Runtime.",
+ "experimental": true,
+ "returns": [
+ {
+ "name": "usedSize",
+ "description": "Used heap size in bytes.",
+ "type": "number"
+ },
+ {
+ "name": "totalSize",
+ "description": "Allocated heap size in bytes.",
+ "type": "number"
+ }
+ ]
+ },
+ {
"name": "getProperties",
"description": "Returns properties of a given object. Object group of the result is inherited from the target\nobject.",
"parameters": [
@@ -2673,6 +2706,12 @@
"name": "prototypeObjectId",
"description": "Identifier of the prototype to return objects for.",
"$ref": "RemoteObjectId"
+ },
+ {
+ "name": "objectGroup",
+ "description": "Symbolic group name that can be used to release the results.",
+ "optional": true,
+ "type": "string"
}
],
"returns": [
@@ -2784,6 +2823,11 @@
"type": "boolean"
}
]
+ },
+ {
+ "name": "terminateExecution",
+ "description": "Terminate current or next JavaScript execution.\nWill cancel the termination when the outer-most script execution ends.",
+ "experimental": true
}
],
"events": [
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
index 5a23199e4a..d6e239cfb5 100644
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -823,13 +823,9 @@ domain Runtime
# Unique object identifier.
type RemoteObjectId extends string
- # Primitive value which cannot be JSON-stringified.
+ # Primitive value which cannot be JSON-stringified. Includes values `-0`, `NaN`, `Infinity`,
+ # `-Infinity`, and bigint literals.
type UnserializableValue extends string
- enum
- Infinity
- NaN
- -Infinity
- -0
# Mirror object referencing original JavaScript object.
type RemoteObject extends object
@@ -843,6 +839,7 @@ domain Runtime
number
boolean
symbol
+ bigint
# Object subtype hint. Specified for `object` type values only.
optional enum subtype
array
@@ -895,6 +892,7 @@ domain Runtime
number
boolean
symbol
+ bigint
# Object subtype hint. Specified for `object` type values only.
optional enum subtype
array
@@ -932,6 +930,7 @@ domain Runtime
boolean
symbol
accessor
+ bigint
# User-friendly property value string.
optional string value
# Nested value preview.
@@ -1188,12 +1187,29 @@ domain Runtime
# Whether execution should `await` for resulting value and return once awaited promise is
# resolved.
optional boolean awaitPromise
+ # Whether to throw an exception if side effect cannot be ruled out during evaluation.
+ experimental optional boolean throwOnSideEffect
returns
# Evaluation result.
RemoteObject result
# Exception details.
optional ExceptionDetails exceptionDetails
+ # Returns the isolate id.
+ experimental command getIsolateId
+ returns
+ # The isolate id.
+ string id
+
+ # Returns the JavaScript heap usage.
+ # It is the total usage of the corresponding isolate not scoped to a particular Runtime.
+ experimental command getHeapUsage
+ returns
+ # Used heap size in bytes.
+ number usedSize
+ # Allocated heap size in bytes.
+ number totalSize
+
# Returns properties of a given object. Object group of the result is inherited from the target
# object.
command getProperties
@@ -1228,6 +1244,8 @@ domain Runtime
parameters
# Identifier of the prototype to return objects for.
RemoteObjectId prototypeObjectId
+ # Symbolic group name that can be used to release the results.
+ optional string objectGroup
returns
# Array with objects.
RemoteObject objects
@@ -1279,6 +1297,10 @@ domain Runtime
parameters
boolean enabled
+ # Terminate current or next JavaScript execution.
+ # Will cancel the termination when the outer-most script execution ends.
+ experimental command terminateExecution
+
# Issued when console API was called.
event consoleAPICalled
parameters
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 43343c887b..f5d8556da8 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -493,22 +493,39 @@ void String16Builder::append(const char* characters, size_t length) {
}
void String16Builder::appendNumber(int number) {
- const int kBufferSize = 11;
+ constexpr int kBufferSize = 11;
char buffer[kBufferSize];
int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
- DCHECK_GT(kBufferSize, chars);
+ DCHECK_LE(0, chars);
m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
}
void String16Builder::appendNumber(size_t number) {
- const int kBufferSize = 20;
+ constexpr int kBufferSize = 20;
char buffer[kBufferSize];
#if !defined(_WIN32) && !defined(_WIN64)
int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
#else
int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%Iu", number);
#endif
- DCHECK_GT(kBufferSize, chars);
+ DCHECK_LE(0, chars);
+ m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
+void String16Builder::appendUnsignedAsHex(uint64_t number) {
+ constexpr int kBufferSize = 17;
+ char buffer[kBufferSize];
+ int chars =
+ v8::base::OS::SNPrintF(buffer, kBufferSize, "%016" PRIx64, number);
+ DCHECK_LE(0, chars);
+ m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
+void String16Builder::appendUnsignedAsHex(uint32_t number) {
+ constexpr int kBufferSize = 9;
+ char buffer[kBufferSize];
+ int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%08" PRIx32, number);
+ DCHECK_LE(0, chars);
m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 7d6867dfc3..461b2961e1 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -112,6 +112,8 @@ class String16Builder {
void append(const char*, size_t);
void appendNumber(int);
void appendNumber(size_t);
+ void appendUnsignedAsHex(uint64_t);
+ void appendUnsignedAsHex(uint32_t);
String16 toString();
void reserveCapacity(size_t);
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index ea50a8dfee..83aee25825 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -92,6 +92,9 @@ class V8ValueStringBuilder {
if (value->IsString()) return append(v8::Local<v8::String>::Cast(value));
if (value->IsStringObject())
return append(v8::Local<v8::StringObject>::Cast(value)->ValueOf());
+ if (value->IsBigInt()) return append(v8::Local<v8::BigInt>::Cast(value));
+ if (value->IsBigIntObject())
+ return append(v8::Local<v8::BigIntObject>::Cast(value)->ValueOf());
if (value->IsSymbol()) return append(v8::Local<v8::Symbol>::Cast(value));
if (value->IsSymbolObject())
return append(v8::Local<v8::SymbolObject>::Cast(value)->ValueOf());
@@ -156,6 +159,13 @@ class V8ValueStringBuilder {
return result;
}
+ bool append(v8::Local<v8::BigInt> bigint) {
+ bool result = append(bigint->ToString());
+ if (m_tryCatch.HasCaught()) return false;
+ m_builder.append('n');
+ return result;
+ }
+
bool append(v8::Local<v8::String> string) {
if (m_tryCatch.HasCaught()) return false;
if (!string.IsEmpty()) m_builder.append(toProtocolString(string));
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index fa04209dec..6f3278ee9f 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -484,23 +484,15 @@ void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
v8::Local<v8::Function> function,
V8DebuggerAgentImpl::BreakpointSource source,
- const String16& condition, bool enable) {
- String16 scriptId = String16::fromInteger(function->ScriptId());
- int lineNumber = function->GetScriptLineNumber();
- int columnNumber = function->GetScriptColumnNumber();
- if (lineNumber == v8::Function::kLineOffsetNotFound ||
- columnNumber == v8::Function::kLineOffsetNotFound)
- return;
-
- if (V8InspectorSessionImpl* session = helper.session(sessionId)) {
- if (!session->debuggerAgent()->enabled()) return;
- if (enable) {
- session->debuggerAgent()->setBreakpointAt(
- scriptId, lineNumber, columnNumber, source, condition);
- } else {
- session->debuggerAgent()->removeBreakpointAt(scriptId, lineNumber,
- columnNumber, source);
- }
+ v8::Local<v8::String> condition,
+ bool enable) {
+ V8InspectorSessionImpl* session = helper.session(sessionId);
+ if (session == nullptr) return;
+ if (!session->debuggerAgent()->enabled()) return;
+ if (enable) {
+ session->debuggerAgent()->setBreakpointFor(function, condition, source);
+ } else {
+ session->debuggerAgent()->removeBreakpointFor(function, source);
}
}
@@ -509,10 +501,14 @@ void V8Console::debugFunctionCallback(
v8::debug::ConsoleCallArguments args(info);
ConsoleHelper helper(args, v8::debug::ConsoleContext(), m_inspector);
v8::Local<v8::Function> function;
+ v8::Local<v8::String> condition;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
+ if (args.Length() > 1 && args[1]->IsString()) {
+ condition = args[1].As<v8::String>();
+ }
setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::DebugCommandBreakpointSource,
- String16(), true);
+ condition, true);
}
void V8Console::undebugFunctionCallback(
@@ -523,7 +519,7 @@ void V8Console::undebugFunctionCallback(
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::DebugCommandBreakpointSource,
- String16(), false);
+ v8::Local<v8::String>(), false);
}
void V8Console::monitorFunctionCallback(
@@ -547,7 +543,8 @@ void V8Console::monitorFunctionCallback(
"Array.prototype.join.call(arguments, \", \") : \"\")) && false");
setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
- builder.toString(), true);
+ toV8String(info.GetIsolate(), builder.toString()),
+ true);
}
void V8Console::unmonitorFunctionCallback(
@@ -558,7 +555,7 @@ void V8Console::unmonitorFunctionCallback(
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
- String16(), false);
+ v8::Local<v8::String>(), false);
}
void V8Console::lastEvaluationResultCallback(
@@ -711,7 +708,7 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
createBoundFunctionProperty(
context, commandLineAPI, data, "debug",
&V8Console::call<&V8Console::debugFunctionCallback>,
- "function debug(function) { [Command Line API] }");
+ "function debug(function, condition) { [Command Line API] }");
createBoundFunctionProperty(
context, commandLineAPI, data, "undebug",
&V8Console::call<&V8Console::undebugFunctionCallback>,
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 78325ef978..ece808b58b 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -97,25 +97,38 @@ String16 generateBreakpointId(BreakpointType type,
return builder.toString();
}
+String16 generateBreakpointId(BreakpointType type,
+ v8::Local<v8::Function> function) {
+ String16Builder builder;
+ builder.appendNumber(static_cast<int>(type));
+ builder.append(':');
+ builder.appendNumber(v8::debug::GetDebuggingId(function));
+ return builder.toString();
+}
+
bool parseBreakpointId(const String16& breakpointId, BreakpointType* type,
String16* scriptSelector = nullptr,
int* lineNumber = nullptr, int* columnNumber = nullptr) {
size_t typeLineSeparator = breakpointId.find(':');
if (typeLineSeparator == String16::kNotFound) return false;
+
+ int rawType = breakpointId.substring(0, typeLineSeparator).toInteger();
+ if (rawType < static_cast<int>(BreakpointType::kByUrl) ||
+ rawType > static_cast<int>(BreakpointType::kMonitorCommand)) {
+ return false;
+ }
+ if (type) *type = static_cast<BreakpointType>(rawType);
+ if (rawType == static_cast<int>(BreakpointType::kDebugCommand) ||
+ rawType == static_cast<int>(BreakpointType::kMonitorCommand)) {
+ // The script and source position is not encoded in this case.
+ return true;
+ }
+
size_t lineColumnSeparator = breakpointId.find(':', typeLineSeparator + 1);
if (lineColumnSeparator == String16::kNotFound) return false;
size_t columnSelectorSeparator =
breakpointId.find(':', lineColumnSeparator + 1);
if (columnSelectorSeparator == String16::kNotFound) return false;
-
- if (type) {
- int rawType = breakpointId.substring(0, typeLineSeparator).toInteger();
- if (rawType < static_cast<int>(BreakpointType::kByUrl) ||
- rawType > static_cast<int>(BreakpointType::kMonitorCommand)) {
- return false;
- }
- *type = static_cast<BreakpointType>(rawType);
- }
if (scriptSelector) {
*scriptSelector = breakpointId.substring(columnSelectorSeparator + 1);
}
@@ -822,6 +835,19 @@ V8DebuggerAgentImpl::setBreakpointImpl(const String16& breakpointId,
.build();
}
+void V8DebuggerAgentImpl::setBreakpointImpl(const String16& breakpointId,
+ v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition) {
+ v8::debug::BreakpointId debuggerBreakpointId;
+ if (!v8::debug::SetFunctionBreakpoint(function, condition,
+ &debuggerBreakpointId)) {
+ return;
+ }
+ m_debuggerBreakpointIdToBreakpointId[debuggerBreakpointId] = breakpointId;
+ m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
+ debuggerBreakpointId);
+}
+
Response V8DebuggerAgentImpl::searchInContent(
const String16& scriptId, const String16& query,
Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
@@ -1357,10 +1383,12 @@ bool V8DebuggerAgentImpl::isPaused() const {
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
- String16 scriptSource = script->source();
- if (!success) script->setSourceURL(findSourceURL(scriptSource, false));
- if (!success)
+ if (!success) {
+ DCHECK(!script->isSourceLoadedLazily());
+ String16 scriptSource = script->source();
+ script->setSourceURL(findSourceURL(scriptSource, false));
script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
+ }
int contextId = script->executionContextId();
int contextGroupId = m_inspector->contextGroupId(contextId);
@@ -1402,13 +1430,23 @@ void V8DebuggerAgentImpl::didParseSource(
stack && !stack->isEmpty() ? stack->buildInspectorObjectImpl(m_debugger)
: nullptr;
if (success) {
- m_frontend.scriptParsed(
- scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
- scriptRef->endLine(), scriptRef->endColumn(), contextId,
- scriptRef->hash(), std::move(executionContextAuxDataParam),
- isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
- isModuleParam, static_cast<int>(scriptRef->source().length()),
- std::move(stackTrace));
+ // TODO(herhut, dgozman): Report correct length for WASM if needed for
+ // coverage. Or do not send the length at all and change coverage instead.
+ if (scriptRef->isSourceLoadedLazily()) {
+ m_frontend.scriptParsed(
+ scriptId, scriptURL, 0, 0, 0, 0, contextId, scriptRef->hash(),
+ std::move(executionContextAuxDataParam), isLiveEditParam,
+ std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam, 0,
+ std::move(stackTrace));
+ } else {
+ m_frontend.scriptParsed(
+ scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+ scriptRef->endLine(), scriptRef->endColumn(), contextId,
+ scriptRef->hash(), std::move(executionContextAuxDataParam),
+ isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
+ isModuleParam, static_cast<int>(scriptRef->source().length()),
+ std::move(stackTrace));
+ }
} else {
m_frontend.scriptFailedToParse(
scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
@@ -1592,29 +1630,26 @@ void V8DebuggerAgentImpl::breakProgram(
}
}
-void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
- int lineNumber, int columnNumber,
- BreakpointSource source,
- const String16& condition) {
+void V8DebuggerAgentImpl::setBreakpointFor(v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition,
+ BreakpointSource source) {
String16 breakpointId = generateBreakpointId(
source == DebugCommandBreakpointSource ? BreakpointType::kDebugCommand
: BreakpointType::kMonitorCommand,
- scriptId, lineNumber, columnNumber);
+ function);
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
return;
}
- setBreakpointImpl(breakpointId, scriptId, condition, lineNumber,
- columnNumber);
+ setBreakpointImpl(breakpointId, function, condition);
}
-void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
- int lineNumber, int columnNumber,
- BreakpointSource source) {
+void V8DebuggerAgentImpl::removeBreakpointFor(v8::Local<v8::Function> function,
+ BreakpointSource source) {
String16 breakpointId = generateBreakpointId(
source == DebugCommandBreakpointSource ? BreakpointType::kDebugCommand
: BreakpointType::kMonitorCommand,
- scriptId, lineNumber, columnNumber);
+ function);
removeBreakpointImpl(breakpointId);
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 6feaeff914..c67e20142d 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -120,11 +120,11 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
bool enabled() const { return m_enabled; }
- void setBreakpointAt(const String16& scriptId, int lineNumber,
- int columnNumber, BreakpointSource,
- const String16& condition = String16());
- void removeBreakpointAt(const String16& scriptId, int lineNumber,
- int columnNumber, BreakpointSource);
+ void setBreakpointFor(v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition,
+ BreakpointSource source);
+ void removeBreakpointFor(v8::Local<v8::Function> function,
+ BreakpointSource source);
void schedulePauseOnNextStatement(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data);
@@ -164,6 +164,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
std::unique_ptr<protocol::Debugger::Location> setBreakpointImpl(
const String16& breakpointId, const String16& scriptId,
const String16& condition, int lineNumber, int columnNumber);
+ void setBreakpointImpl(const String16& breakpointId,
+ v8::Local<v8::Function> function,
+ v8::Local<v8::String> condition);
void removeBreakpointImpl(const String16& breakpointId);
void clearBreakDetails();
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index c596ee5053..dc5054a665 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -13,17 +13,8 @@ namespace v8_inspector {
namespace {
-const char hexDigits[17] = "0123456789ABCDEF";
const char kGlobalDebuggerScriptHandleLabel[] = "DevTools debugger";
-void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
- for (size_t i = 0; i < 8; ++i) {
- UChar c = hexDigits[number & 0xF];
- destination->append(c);
- number >>= 4;
- }
-}
-
// Hash algorithm for substrings is described in "Ɯber die KomplexitƤt der
// Multiplikation in
// eingeschrƤnkten Branchingprogrammmodellen" by Woelfe.
@@ -82,7 +73,8 @@ String16 calculateHash(const String16& str) {
hashes[i] = (hashes[i] + zi[i] * (prime[i] - 1)) % prime[i];
String16Builder hash;
- for (size_t i = 0; i < hashesSize; ++i) appendUnsignedAsHex(hashes[i], &hash);
+ for (size_t i = 0; i < hashesSize; ++i)
+ hash.appendUnsignedAsHex((uint32_t)hashes[i]);
return hash.toString();
}
@@ -157,6 +149,13 @@ class ActualScript : public V8DebuggerScript {
bool isLiveEdit() const override { return m_isLiveEdit; }
bool isModule() const override { return m_isModule; }
+ const String16& source() const override { return m_source; }
+ int startLine() const override { return m_startLine; }
+ int startColumn() const override { return m_startColumn; }
+ int endLine() const override { return m_endLine; }
+ int endColumn() const override { return m_endColumn; }
+ bool isSourceLoadedLazily() const override { return false; }
+
const String16& sourceMappingURL() const override {
return m_sourceMappingURL;
}
@@ -241,6 +240,12 @@ class ActualScript : public V8DebuggerScript {
id);
}
+ const String16& hash() const override {
+ if (m_hash.isEmpty()) m_hash = calculateHash(source());
+ DCHECK(!m_hash.isEmpty());
+ return m_hash;
+ }
+
private:
String16 GetNameOrSourceUrl(v8::Local<v8::debug::Script> script) {
v8::Local<v8::String> name;
@@ -256,6 +261,12 @@ class ActualScript : public V8DebuggerScript {
String16 m_sourceMappingURL;
bool m_isLiveEdit = false;
bool m_isModule = false;
+ String16 m_source;
+ mutable String16 m_hash;
+ int m_startLine = 0;
+ int m_startColumn = 0;
+ int m_endLine = 0;
+ int m_endColumn = 0;
v8::Global<v8::debug::Script> m_script;
};
@@ -265,22 +276,12 @@ class WasmVirtualScript : public V8DebuggerScript {
public:
WasmVirtualScript(v8::Isolate* isolate, WasmTranslation* wasmTranslation,
v8::Local<v8::debug::WasmScript> script, String16 id,
- String16 url, String16 source)
+ String16 url, int functionIndex)
: V8DebuggerScript(isolate, std::move(id), std::move(url)),
m_script(isolate, script),
- m_wasmTranslation(wasmTranslation) {
+ m_wasmTranslation(wasmTranslation),
+ m_functionIndex(functionIndex) {
m_script.AnnotateStrongRetainer(kGlobalDebuggerScriptHandleLabel);
- int num_lines = 0;
- int last_newline = -1;
- size_t next_newline = source.find('\n', last_newline + 1);
- while (next_newline != String16::kNotFound) {
- last_newline = static_cast<int>(next_newline);
- next_newline = source.find('\n', last_newline + 1);
- ++num_lines;
- }
- m_endLine = num_lines;
- m_endColumn = static_cast<int>(source.length()) - last_newline - 1;
- m_source = std::move(source);
m_executionContextId = script->ContextId().ToChecked();
}
@@ -289,6 +290,22 @@ class WasmVirtualScript : public V8DebuggerScript {
bool isModule() const override { return false; }
void setSourceMappingURL(const String16&) override {}
void setSource(const String16&, bool, bool*) override { UNREACHABLE(); }
+ bool isSourceLoadedLazily() const override { return true; }
+ const String16& source() const override {
+ return m_wasmTranslation->GetSource(m_id, m_functionIndex);
+ }
+ int startLine() const override {
+ return m_wasmTranslation->GetStartLine(m_id, m_functionIndex);
+ }
+ int startColumn() const override {
+ return m_wasmTranslation->GetStartColumn(m_id, m_functionIndex);
+ }
+ int endLine() const override {
+ return m_wasmTranslation->GetEndLine(m_id, m_functionIndex);
+ }
+ int endColumn() const override {
+ return m_wasmTranslation->GetEndColumn(m_id, m_functionIndex);
+ }
bool getPossibleBreakpoints(
const v8::debug::Location& start, const v8::debug::Location& end,
@@ -347,6 +364,13 @@ class WasmVirtualScript : public V8DebuggerScript {
return true;
}
+ const String16& hash() const override {
+ if (m_hash.isEmpty()) {
+ m_hash = m_wasmTranslation->GetHash(m_id, m_functionIndex);
+ }
+ return m_hash;
+ }
+
private:
static const String16& emptyString() {
static const String16 singleEmptyString;
@@ -359,6 +383,8 @@ class WasmVirtualScript : public V8DebuggerScript {
v8::Global<v8::debug::WasmScript> m_script;
WasmTranslation* m_wasmTranslation;
+ int m_functionIndex;
+ mutable String16 m_hash;
};
} // namespace
@@ -373,10 +399,10 @@ std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create(
std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm(
v8::Isolate* isolate, WasmTranslation* wasmTranslation,
v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
- String16 url, String16 source) {
+ String16 url, int functionIndex) {
return std::unique_ptr<WasmVirtualScript>(
new WasmVirtualScript(isolate, wasmTranslation, underlyingScript,
- std::move(id), std::move(url), std::move(source)));
+ std::move(id), std::move(url), functionIndex));
}
V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
@@ -389,12 +415,6 @@ const String16& V8DebuggerScript::sourceURL() const {
return m_sourceURL.isEmpty() ? m_url : m_sourceURL;
}
-const String16& V8DebuggerScript::hash() const {
- if (m_hash.isEmpty()) m_hash = calculateHash(source());
- DCHECK(!m_hash.isEmpty());
- return m_hash;
-}
-
void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
m_sourceURL = sourceURL;
}
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 6badd87c97..3e3885ed52 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -50,7 +50,7 @@ class V8DebuggerScript {
static std::unique_ptr<V8DebuggerScript> CreateWasm(
v8::Isolate* isolate, WasmTranslation* wasmTranslation,
v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
- String16 url, String16 source);
+ String16 url, int functionIndex);
virtual ~V8DebuggerScript();
@@ -59,15 +59,16 @@ class V8DebuggerScript {
bool hasSourceURL() const { return !m_sourceURL.isEmpty(); }
const String16& sourceURL() const;
virtual const String16& sourceMappingURL() const = 0;
- const String16& source() const { return m_source; }
- const String16& hash() const;
- int startLine() const { return m_startLine; }
- int startColumn() const { return m_startColumn; }
- int endLine() const { return m_endLine; }
- int endColumn() const { return m_endColumn; }
+ virtual const String16& source() const = 0;
+ virtual const String16& hash() const = 0;
+ virtual int startLine() const = 0;
+ virtual int startColumn() const = 0;
+ virtual int endLine() const = 0;
+ virtual int endColumn() const = 0;
int executionContextId() const { return m_executionContextId; }
virtual bool isLiveEdit() const = 0;
virtual bool isModule() const = 0;
+ virtual bool isSourceLoadedLazily() const = 0;
void setSourceURL(const String16&);
virtual void setSourceMappingURL(const String16&) = 0;
@@ -95,12 +96,6 @@ class V8DebuggerScript {
String16 m_id;
String16 m_url;
String16 m_sourceURL;
- String16 m_source;
- mutable String16 m_hash;
- int m_startLine = 0;
- int m_startColumn = 0;
- int m_endLine = 0;
- int m_endColumn = 0;
int m_executionContextId = 0;
v8::Isolate* m_isolate;
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 0f3fd0377e..1cdbce27e6 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -29,10 +29,8 @@ v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
v8::Isolate* isolate = context->GetIsolate();
v8::Local<v8::Array> entries;
bool isKeyValue = false;
- if (!value->IsObject() ||
- !value.As<v8::Object>()->PreviewEntries(&isKeyValue).ToLocal(&entries)) {
+ if (!v8::debug::EntriesPreview(isolate, value, &isKeyValue).ToLocal(&entries))
return v8::MaybeLocal<v8::Array>();
- }
v8::Local<v8::Array> wrappedEntries = v8::Array::New(isolate);
CHECK(!isKeyValue || wrappedEntries->Length() % 2 == 0);
@@ -156,7 +154,6 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
v8::Local<v8::Context> m_context;
v8::Local<v8::Value> m_prototype;
};
-
} // namespace
V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
@@ -170,14 +167,20 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
m_pauseOnExceptionsState(v8::debug::NoBreakOnException),
m_wasmTranslation(isolate) {}
-V8Debugger::~V8Debugger() {}
+V8Debugger::~V8Debugger() {
+ if (m_terminateExecutionCallback) {
+ m_isolate->RemoveCallCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ m_isolate->RemoveMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ }
+}
void V8Debugger::enable() {
if (m_enableCount++) return;
v8::HandleScope scope(m_isolate);
v8::debug::SetDebugDelegate(m_isolate, this);
- v8::debug::SetOutOfMemoryCallback(m_isolate, &V8Debugger::v8OOMCallback,
- this);
+ m_isolate->AddNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback, this);
v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
}
@@ -191,8 +194,9 @@ void V8Debugger::disable() {
m_pauseOnAsyncCall = false;
m_wasmTranslation.Clear();
v8::debug::SetDebugDelegate(m_isolate, nullptr);
- v8::debug::SetOutOfMemoryCallback(m_isolate, nullptr, nullptr);
- m_isolate->RestoreOriginalHeapLimit();
+ m_isolate->RemoveNearHeapLimitCallback(&V8Debugger::nearHeapLimitCallback,
+ m_originalHeapLimit);
+ m_originalHeapLimit = 0;
}
bool V8Debugger::isPausedInContextGroup(int contextGroupId) const {
@@ -336,6 +340,34 @@ void V8Debugger::pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
m_taskWithScheduledBreakDebuggerId = debuggerId;
}
+void V8Debugger::terminateExecution(
+ std::unique_ptr<TerminateExecutionCallback> callback) {
+ if (m_terminateExecutionCallback) {
+ callback->sendFailure(
+ Response::Error("There is current termination request in progress"));
+ return;
+ }
+ m_terminateExecutionCallback = std::move(callback);
+ m_isolate->AddCallCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ m_isolate->AddMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ m_isolate->TerminateExecution();
+}
+
+void V8Debugger::terminateExecutionCompletedCallback(v8::Isolate* isolate) {
+ isolate->RemoveCallCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ isolate->RemoveMicrotasksCompletedCallback(
+ &V8Debugger::terminateExecutionCompletedCallback);
+ V8InspectorImpl* inspector =
+ static_cast<V8InspectorImpl*>(v8::debug::GetInspector(isolate));
+ V8Debugger* debugger = inspector->debugger();
+ debugger->m_isolate->CancelTerminateExecution();
+ debugger->m_terminateExecutionCallback->sendSuccess();
+ debugger->m_terminateExecutionCallback.reset();
+}
+
Response V8Debugger::continueToLocation(
int targetContextGroupId, V8DebuggerScript* script,
std::unique_ptr<protocol::Debugger::Location> location,
@@ -457,14 +489,26 @@ void V8Debugger::handleProgramBreak(
m_scheduledAssertBreak = false;
}
-void V8Debugger::v8OOMCallback(void* data) {
+namespace {
+
+size_t HeapLimitForDebugging(size_t initial_heap_limit) {
+ const size_t kDebugHeapSizeFactor = 4;
+ size_t max_limit = std::numeric_limits<size_t>::max() / 4;
+ return std::min(max_limit, initial_heap_limit * kDebugHeapSizeFactor);
+}
+
+} // anonymous namespace
+
+size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
+ size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
- thisPtr->m_isolate->IncreaseHeapLimitForDebugging();
+ thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context = thisPtr->m_isolate->GetEnteredContext();
DCHECK(!context.IsEmpty());
thisPtr->setPauseOnNextStatement(
true, thisPtr->m_inspector->contextGroupId(context));
+ return HeapLimitForDebugging(initial_heap_limit);
}
void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index a710726581..351e5b66ad 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -32,6 +32,8 @@ struct V8StackTraceId;
using protocol::Response;
using ScheduleStepIntoAsyncCallback =
protocol::Debugger::Backend::ScheduleStepIntoAsyncCallback;
+using TerminateExecutionCallback =
+ protocol::Runtime::Backend::TerminateExecutionCallback;
class V8Debugger : public v8::debug::DebugDelegate {
public:
@@ -60,6 +62,8 @@ class V8Debugger : public v8::debug::DebugDelegate {
void pauseOnAsyncCall(int targetContextGroupId, uintptr_t task,
const String16& debuggerId);
+ void terminateExecution(std::unique_ptr<TerminateExecutionCallback> callback);
+
Response continueToLocation(int targetContextGroupId,
V8DebuggerScript* script,
std::unique_ptr<protocol::Debugger::Location>,
@@ -131,7 +135,9 @@ class V8Debugger : public v8::debug::DebugDelegate {
void clearContinueToLocation();
bool shouldContinueToCurrentLocation();
- static void v8OOMCallback(void* data);
+ static size_t nearHeapLimitCallback(void* data, size_t current_heap_limit,
+ size_t initial_heap_limit);
+ static void terminateExecutionCompletedCallback(v8::Isolate* isolate);
void handleProgramBreak(
v8::Local<v8::Context> pausedContext, v8::Local<v8::Value> exception,
@@ -184,6 +190,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
int m_enableCount;
int m_breakpointsActiveCount = 0;
int m_ignoreScriptParsedEventsCounter;
+ size_t m_originalHeapLimit = 0;
bool m_scheduledOOMBreak = false;
bool m_scheduledAssertBreak = false;
int m_targetContextGroupId = 0;
@@ -232,6 +239,8 @@ class V8Debugger : public v8::debug::DebugDelegate {
protocol::HashMap<String16, std::pair<int64_t, int64_t>>
m_serializedDebuggerIdToDebuggerId;
+ std::unique_ptr<TerminateExecutionCallback> m_terminateExecutionCallback;
+
WasmTranslation m_wasmTranslation;
DISALLOW_COPY_AND_ASSIGN(V8Debugger);
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index 1455cf6dbc..23f56f93e2 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -257,7 +257,8 @@ void V8InjectedScriptHost::getInternalPropertiesCallback(
std::unordered_set<String16> allowedProperties;
if (info[0]->IsBooleanObject() || info[0]->IsNumberObject() ||
- info[0]->IsStringObject() || info[0]->IsSymbolObject()) {
+ info[0]->IsStringObject() || info[0]->IsSymbolObject() ||
+ info[0]->IsBigIntObject()) {
allowedProperties.insert(String16("[[PrimitiveValue]]"));
} else if (info[0]->IsPromise()) {
allowedProperties.insert(String16("[[PromiseStatus]]"));
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index a29f07b0bf..14b7925514 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -58,11 +58,14 @@ V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
m_debugger(new V8Debugger(isolate, this)),
m_capturingStackTracesCount(0),
m_lastExceptionId(0),
- m_lastContextId(0) {
+ m_lastContextId(0),
+ m_isolateId(v8::debug::GetNextRandomInt64(m_isolate)) {
+ v8::debug::SetInspector(m_isolate, this);
v8::debug::SetConsoleDelegate(m_isolate, console());
}
V8InspectorImpl::~V8InspectorImpl() {
+ v8::debug::SetInspector(m_isolate, nullptr);
v8::debug::SetConsoleDelegate(m_isolate, nullptr);
}
@@ -234,21 +237,9 @@ void V8InspectorImpl::resetContextGroup(int contextGroupId) {
m_debugger->wasmTranslation()->Clear();
}
-void V8InspectorImpl::idleStarted() {
- for (auto& it : m_sessions) {
- for (auto& it2 : it.second) {
- if (it2.second->profilerAgent()->idleStarted()) return;
- }
- }
-}
+void V8InspectorImpl::idleStarted() { m_isolate->SetIdle(true); }
-void V8InspectorImpl::idleFinished() {
- for (auto& it : m_sessions) {
- for (auto& it2 : it.second) {
- if (it2.second->profilerAgent()->idleFinished()) return;
- }
- }
-}
+void V8InspectorImpl::idleFinished() { m_isolate->SetIdle(false); }
unsigned V8InspectorImpl::exceptionThrown(
v8::Local<v8::Context> context, const StringView& message,
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 0627eae317..d34eddfa68 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -61,6 +61,7 @@ class V8InspectorImpl : public V8Inspector {
V8Debugger* debugger() { return m_debugger.get(); }
int contextGroupId(v8::Local<v8::Context>) const;
int contextGroupId(int contextId) const;
+ uint64_t isolateId() const { return m_isolateId; }
v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
v8::Local<v8::String>);
@@ -128,6 +129,7 @@ class V8InspectorImpl : public V8Inspector {
unsigned m_lastExceptionId;
int m_lastContextId;
int m_lastSessionId = 0;
+ uint64_t m_isolateId;
using MuteExceptionsMap = protocol::HashMap<int, int>;
MuteExceptionsMap m_muteExceptionsMap;
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index d580c41e30..1d8d12ac0d 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -197,8 +197,11 @@ Response V8InspectorSessionImpl::findInjectedScript(
if (!context) return Response::Error("Cannot find context with specified id");
injectedScript = context->getInjectedScript(m_sessionId);
if (!injectedScript) {
- if (!context->createInjectedScript(m_sessionId))
+ if (!context->createInjectedScript(m_sessionId)) {
+ if (m_inspector->isolate()->IsExecutionTerminating())
+ return Response::Error("Execution was terminated");
return Response::Error("Cannot access specified execution context");
+ }
injectedScript = context->getInjectedScript(m_sessionId);
if (m_customObjectFormatterEnabled)
injectedScript->setCustomObjectFormatterEnabled(true);
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 2675216143..59a99d79d5 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -476,7 +476,6 @@ void V8ProfilerAgentImpl::startProfiling(const String16& title) {
if (!m_startedProfilesCount) {
DCHECK(!m_profiler);
m_profiler = v8::CpuProfiler::New(m_isolate);
- m_profiler->SetIdle(m_idle);
int interval =
m_state->integerProperty(ProfilerAgentState::samplingInterval, 0);
if (interval) m_profiler->SetSamplingInterval(interval);
@@ -503,16 +502,4 @@ std::unique_ptr<protocol::Profiler::Profile> V8ProfilerAgentImpl::stopProfiling(
return result;
}
-bool V8ProfilerAgentImpl::idleStarted() {
- m_idle = true;
- if (m_profiler) m_profiler->SetIdle(m_idle);
- return m_profiler;
-}
-
-bool V8ProfilerAgentImpl::idleFinished() {
- m_idle = false;
- if (m_profiler) m_profiler->SetIdle(m_idle);
- return m_profiler;
-}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index a68ea1144c..5370d39eb4 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -57,9 +57,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
- bool idleStarted();
- bool idleFinished();
-
private:
String16 nextProfileId();
@@ -77,7 +74,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
class ProfileDescriptor;
std::vector<ProfileDescriptor> m_startedProfiles;
String16 m_frontendInitiatedProfileId;
- bool m_idle = false;
int m_startedProfilesCount = 0;
DISALLOW_COPY_AND_ASSIGN(V8ProfilerAgentImpl);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 6975f35e71..bb3198e0bf 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -30,6 +30,8 @@
#include "src/inspector/v8-runtime-agent-impl.h"
+#include <inttypes.h>
+
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -134,6 +136,9 @@ void innerCallFunctionOn(
if (silent) scope.ignoreExceptionsAndMuteConsole();
if (userGesture) scope.pretendUserGesture();
+ // Temporarily enable allow evals for inspector.
+ scope.allowCodeGenerationFromStrings();
+
v8::MaybeLocal<v8::Value> maybeFunctionValue;
v8::Local<v8::Script> functionScript;
if (inspector
@@ -227,7 +232,8 @@ void V8RuntimeAgentImpl::evaluate(
Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
Maybe<int> executionContextId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview, Maybe<bool> userGesture,
- Maybe<bool> awaitPromise, std::unique_ptr<EvaluateCallback> callback) {
+ Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect,
+ std::unique_ptr<EvaluateCallback> callback) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
"EvaluateScript");
int contextId = 0;
@@ -250,20 +256,18 @@ void V8RuntimeAgentImpl::evaluate(
if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
- bool evalIsDisabled = !scope.context()->IsCodeGenerationFromStringsAllowed();
// Temporarily enable allow evals for inspector.
- if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
+ scope.allowCodeGenerationFromStrings();
v8::MaybeLocal<v8::Value> maybeResultValue;
{
v8::MicrotasksScope microtasksScope(m_inspector->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
maybeResultValue = v8::debug::EvaluateGlobal(
- m_inspector->isolate(), toV8String(m_inspector->isolate(), expression));
+ m_inspector->isolate(), toV8String(m_inspector->isolate(), expression),
+ throwOnSideEffect.fromMaybe(false));
} // Run microtasks before returning result.
- if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
-
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
@@ -571,7 +575,7 @@ void V8RuntimeAgentImpl::runScript(
}
Response V8RuntimeAgentImpl::queryObjects(
- const String16& prototypeObjectId,
+ const String16& prototypeObjectId, Maybe<String16> objectGroup,
std::unique_ptr<protocol::Runtime::RemoteObject>* objects) {
InjectedScript::ObjectScope scope(m_session, prototypeObjectId);
Response response = scope.initialize();
@@ -582,7 +586,8 @@ Response V8RuntimeAgentImpl::queryObjects(
v8::Local<v8::Array> resultArray = m_inspector->debugger()->queryObjects(
scope.context(), v8::Local<v8::Object>::Cast(scope.object()));
return scope.injectedScript()->wrapObject(
- resultArray, scope.objectGroupName(), false, false, objects);
+ resultArray, objectGroup.fromMaybe(scope.objectGroupName()), false, false,
+ objects);
}
Response V8RuntimeAgentImpl::globalLexicalScopeNames(
@@ -606,6 +611,27 @@ Response V8RuntimeAgentImpl::globalLexicalScopeNames(
return Response::OK();
}
+Response V8RuntimeAgentImpl::getIsolateId(String16* outIsolateId) {
+ char buf[40];
+ std::snprintf(buf, sizeof(buf), "%" PRIx64, m_inspector->isolateId());
+ *outIsolateId = buf;
+ return Response::OK();
+}
+
+Response V8RuntimeAgentImpl::getHeapUsage(double* out_usedSize,
+ double* out_totalSize) {
+ v8::HeapStatistics stats;
+ m_inspector->isolate()->GetHeapStatistics(&stats);
+ *out_usedSize = stats.used_heap_size();
+ *out_totalSize = stats.total_heap_size();
+ return Response::OK();
+}
+
+void V8RuntimeAgentImpl::terminateExecution(
+ std::unique_ptr<TerminateExecutionCallback> callback) {
+ m_inspector->debugger()->terminateExecution(std::move(callback));
+}
+
void V8RuntimeAgentImpl::restore() {
if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
return;
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index 790654da08..3975e1b970 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -63,7 +63,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
Maybe<int> executionContextId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview, Maybe<bool> userGesture,
- Maybe<bool> awaitPromise,
+ Maybe<bool> awaitPromise, Maybe<bool> throwOnSideEffect,
std::unique_ptr<EvaluateCallback>) override;
void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
Maybe<bool> generatePreview,
@@ -99,11 +99,15 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
std::unique_ptr<RunScriptCallback>) override;
Response queryObjects(
- const String16& prototypeObjectId,
+ const String16& prototypeObjectId, Maybe<String16> objectGroup,
std::unique_ptr<protocol::Runtime::RemoteObject>* objects) override;
Response globalLexicalScopeNames(
Maybe<int> executionContextId,
std::unique_ptr<protocol::Array<String16>>* outNames) override;
+ Response getIsolateId(String16* outIsolateId) override;
+ Response getHeapUsage(double* out_usedSize, double* out_totalSize) override;
+ void terminateExecution(
+ std::unique_ptr<TerminateExecutionCallback> callback) override;
void reset();
void reportExecutionContextCreated(InspectedContext*);
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
index 431573d842..4754af5442 100644
--- a/deps/v8/src/inspector/wasm-translation.cc
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -5,6 +5,7 @@
#include "src/inspector/wasm-translation.h"
#include <algorithm>
+#include <utility>
#include "src/debug/debug-interface.h"
#include "src/inspector/string-util.h"
@@ -15,6 +16,42 @@
namespace v8_inspector {
+using OffsetTable = v8::debug::WasmDisassembly::OffsetTable;
+
+struct WasmSourceInformation {
+ String16 source;
+ int end_line = 0;
+ int end_column = 0;
+
+ OffsetTable offset_table;
+ OffsetTable reverse_offset_table;
+
+ WasmSourceInformation(String16 source, OffsetTable offset_table)
+ : source(std::move(source)), offset_table(std::move(offset_table)) {
+ int num_lines = 0;
+ int last_newline = -1;
+ size_t next_newline = this->source.find('\n', last_newline + 1);
+ while (next_newline != String16::kNotFound) {
+ last_newline = static_cast<int>(next_newline);
+ next_newline = this->source.find('\n', last_newline + 1);
+ ++num_lines;
+ }
+ end_line = num_lines;
+ end_column = static_cast<int>(this->source.length()) - last_newline - 1;
+
+ reverse_offset_table = this->offset_table;
+ // Order by line, column, then byte offset.
+ auto cmp = [](OffsetTable::value_type el1, OffsetTable::value_type el2) {
+ if (el1.line != el2.line) return el1.line < el2.line;
+ if (el1.column != el2.column) return el1.column < el2.column;
+ return el1.byte_offset < el2.byte_offset;
+ };
+ std::sort(reverse_offset_table.begin(), reverse_offset_table.end(), cmp);
+ }
+
+ WasmSourceInformation() = default;
+};
+
class WasmTranslation::TranslatorImpl {
public:
struct TransLocation {
@@ -33,6 +70,10 @@ class WasmTranslation::TranslatorImpl {
virtual void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
virtual void Translate(TransLocation*) = 0;
virtual void TranslateBack(TransLocation*) = 0;
+ virtual const WasmSourceInformation& GetSourceInformation(v8::Isolate*,
+ int index) = 0;
+ virtual const String16 GetHash(v8::Isolate*, int index) = 0;
+
virtual ~TranslatorImpl() {}
class RawTranslator;
@@ -42,14 +83,28 @@ class WasmTranslation::TranslatorImpl {
class WasmTranslation::TranslatorImpl::RawTranslator
: public WasmTranslation::TranslatorImpl {
public:
- void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) {}
- void Translate(TransLocation*) {}
- void TranslateBack(TransLocation*) {}
+ void Init(v8::Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) override {}
+ void Translate(TransLocation*) override {}
+ void TranslateBack(TransLocation*) override {}
+ const WasmSourceInformation& GetSourceInformation(v8::Isolate*,
+ int index) override {
+ // NOTE(mmarchini): prior to 3.9, clang won't accept const object
+ // instantiations with non-user-provided default constructors, unless an
+ // empty initializer is explicitly given. Node.js still supports older
+ // clang versions, therefore we must take care when using const objects
+ // with default constructors. For more informations, please refer to CWG
+ // 253 (http://open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#253)
+ static const WasmSourceInformation singleEmptySourceInformation = {};
+ return singleEmptySourceInformation;
+ }
+ const String16 GetHash(v8::Isolate*, int index) override {
+ // TODO(herhut): Find useful hash default value.
+ return String16();
+ }
};
class WasmTranslation::TranslatorImpl::DisassemblingTranslator
: public WasmTranslation::TranslatorImpl {
- using OffsetTable = v8::debug::WasmDisassembly::OffsetTable;
public:
DisassemblingTranslator(v8::Isolate* isolate,
@@ -99,48 +154,36 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
}
}
+ static bool LessThan(const v8::debug::WasmDisassemblyOffsetTableEntry& entry,
+ const TransLocation& loc) {
+ return entry.line < loc.line ||
+ (entry.line == loc.line && entry.column < loc.column);
+ }
+
void TranslateBack(TransLocation* loc) override {
- int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
- const OffsetTable* reverse_table = GetReverseTable(func_index);
- if (!reverse_table) return;
- DCHECK(!reverse_table->empty());
v8::Isolate* isolate = loc->translation->isolate_;
+ int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
+ const OffsetTable& reverse_table = GetReverseTable(isolate, func_index);
+ if (reverse_table.empty()) return;
// Binary search for the given line and column.
- unsigned left = 0; // inclusive
- unsigned right = static_cast<unsigned>(reverse_table->size()); // exclusive
- while (right - left > 1) {
- unsigned mid = (left + right) / 2;
- auto& entry = (*reverse_table)[mid];
- if (entry.line < loc->line ||
- (entry.line == loc->line && entry.column <= loc->column)) {
- left = mid;
- } else {
- right = mid;
- }
- }
+ auto element = std::lower_bound(reverse_table.begin(), reverse_table.end(),
+ *loc, LessThan);
int found_byte_offset = 0;
- // [left] is <= <line,column>, or left==0 and [0] > <line,column>.
- // We are searching for the smallest entry >= <line,column> which is still
- // on the same line. This must be either [left] or [left + 1].
- // If we don't find such an entry, we might have hit the special case of
- // pointing after the last line, which is translated to the end of the
- // function (one byte after the last function byte).
- if ((*reverse_table)[left].line == loc->line &&
- (*reverse_table)[left].column >= loc->column) {
- found_byte_offset = (*reverse_table)[left].byte_offset;
- } else if (left + 1 < reverse_table->size() &&
- (*reverse_table)[left + 1].line == loc->line &&
- (*reverse_table)[left + 1].column >= loc->column) {
- found_byte_offset = (*reverse_table)[left + 1].byte_offset;
- } else if (left == reverse_table->size() - 1 &&
- (*reverse_table)[left].line == loc->line - 1 &&
- loc->column == 0) {
+ // We want an entry on the same line if possible.
+ if (element == reverse_table.end()) {
+ // We did not find an element, so this points after the function.
std::pair<int, int> func_range =
script_.Get(isolate)->GetFunctionRange(func_index);
DCHECK_LE(func_range.first, func_range.second);
found_byte_offset = func_range.second - func_range.first;
+ } else if (element->line == loc->line || element == reverse_table.begin()) {
+ found_byte_offset = element->byte_offset;
+ } else {
+ auto prev = element - 1;
+ DCHECK(prev->line == loc->line);
+ found_byte_offset = prev->byte_offset;
}
loc->script_id = String16::fromInteger(script_.Get(isolate)->Id());
@@ -148,6 +191,31 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
loc->column = found_byte_offset;
}
+ const WasmSourceInformation& GetSourceInformation(v8::Isolate* isolate,
+ int index) override {
+ auto it = source_informations_.find(index);
+ if (it != source_informations_.end()) return it->second;
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
+ v8::debug::WasmDisassembly disassembly = script->DisassembleFunction(index);
+
+ auto inserted = source_informations_.insert(std::make_pair(
+ index, WasmSourceInformation({disassembly.disassembly.data(),
+ disassembly.disassembly.length()},
+ std::move(disassembly.offset_table))));
+ DCHECK(inserted.second);
+ return inserted.first->second;
+ }
+
+ const String16 GetHash(v8::Isolate* isolate, int index) override {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
+ uint32_t hash = script->GetFunctionHash(index);
+ String16Builder builder;
+ builder.appendUnsignedAsHex(hash);
+ return builder.toString();
+ }
+
private:
String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
@@ -182,20 +250,10 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
- v8::Local<v8::debug::WasmScript> script = script_.Get(isolate);
- // TODO(clemensh): Generate disassembly lazily when queried by the frontend.
- v8::debug::WasmDisassembly disassembly =
- script->DisassembleFunction(func_idx);
-
- DCHECK_EQ(0, offset_tables_.count(func_idx));
- offset_tables_.insert(
- std::make_pair(func_idx, std::move(disassembly.offset_table)));
- String16 source(disassembly.disassembly.data(),
- disassembly.disassembly.length());
std::unique_ptr<V8DebuggerScript> fake_script =
- V8DebuggerScript::CreateWasm(isolate, translation, script,
+ V8DebuggerScript::CreateWasm(isolate, translation, script_.Get(isolate),
fake_script_id, std::move(fake_script_url),
- source);
+ func_idx);
translation->AddFakeScript(fake_script->scriptId(), this);
agent->didParseSource(std::move(fake_script), true);
@@ -212,42 +270,19 @@ class WasmTranslation::TranslatorImpl::DisassemblingTranslator
const OffsetTable& GetOffsetTable(const TransLocation* loc) {
int func_index = loc->line;
- auto it = offset_tables_.find(func_index);
- // TODO(clemensh): Once we load disassembly lazily, the offset table
- // might not be there yet. Load it lazily then.
- DCHECK(it != offset_tables_.end());
- return it->second;
+ return GetSourceInformation(loc->translation->isolate_, func_index)
+ .offset_table;
}
- const OffsetTable* GetReverseTable(int func_index) {
- auto it = reverse_tables_.find(func_index);
- if (it != reverse_tables_.end()) return &it->second;
-
- // Find offset table, copy and sort it to get reverse table.
- it = offset_tables_.find(func_index);
- if (it == offset_tables_.end()) return nullptr;
-
- OffsetTable reverse_table = it->second;
- // Order by line, column, then byte offset.
- auto cmp = [](OffsetTable::value_type el1, OffsetTable::value_type el2) {
- if (el1.line != el2.line) return el1.line < el2.line;
- if (el1.column != el2.column) return el1.column < el2.column;
- return el1.byte_offset < el2.byte_offset;
- };
- std::sort(reverse_table.begin(), reverse_table.end(), cmp);
-
- auto inserted = reverse_tables_.insert(
- std::make_pair(func_index, std::move(reverse_table)));
- DCHECK(inserted.second);
- return &inserted.first->second;
+ const OffsetTable& GetReverseTable(v8::Isolate* isolate, int func_index) {
+ return GetSourceInformation(isolate, func_index).reverse_offset_table;
}
v8::Global<v8::debug::WasmScript> script_;
// We assume to only disassemble a subset of the functions, so store them in a
// map instead of an array.
- std::unordered_map<int, const OffsetTable> offset_tables_;
- std::unordered_map<int, const OffsetTable> reverse_tables_;
+ std::unordered_map<int, WasmSourceInformation> source_informations_;
};
WasmTranslation::WasmTranslation(v8::Isolate* isolate)
@@ -280,6 +315,31 @@ void WasmTranslation::Clear() {
fake_scripts_.clear();
}
+const String16& WasmTranslation::GetSource(const String16& script_id,
+ int func_index) {
+ auto it = fake_scripts_.find(script_id);
+ DCHECK_NE(it, fake_scripts_.end());
+ return it->second->GetSourceInformation(isolate_, func_index).source;
+}
+
+int WasmTranslation::GetEndLine(const String16& script_id, int func_index) {
+ auto it = fake_scripts_.find(script_id);
+ DCHECK_NE(it, fake_scripts_.end());
+ return it->second->GetSourceInformation(isolate_, func_index).end_line;
+}
+
+int WasmTranslation::GetEndColumn(const String16& script_id, int func_index) {
+ auto it = fake_scripts_.find(script_id);
+ DCHECK_NE(it, fake_scripts_.end());
+ return it->second->GetSourceInformation(isolate_, func_index).end_column;
+}
+
+String16 WasmTranslation::GetHash(const String16& script_id, int func_index) {
+ auto it = fake_scripts_.find(script_id);
+ DCHECK_NE(it, fake_scripts_.end());
+ return it->second->GetHash(isolate_, func_index);
+}
+
// Translation "forward" (to artificial scripts).
bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
String16* script_id, int* line_number, int* column_number) {
@@ -326,5 +386,4 @@ void WasmTranslation::AddFakeScript(const String16& scriptId,
DCHECK_EQ(0, fake_scripts_.count(scriptId));
fake_scripts_.insert(std::make_pair(scriptId, translator));
}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
index 9bd33c0bc8..0df1f58e21 100644
--- a/deps/v8/src/inspector/wasm-translation.h
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -56,6 +56,13 @@ class WasmTranslation {
int* line_number,
int* column_number);
+ const String16& GetSource(const String16& script_id, int func_index);
+ int GetStartLine(const String16& script_id, int func_index) { return 0; }
+ int GetStartColumn(const String16& script_id, int func_index) { return 0; }
+ int GetEndLine(const String16& script_id, int func_index);
+ int GetEndColumn(const String16& script_id, int func_index);
+ String16 GetHash(const String16& script_id, int func_index);
+
private:
class TranslatorImpl;
friend class TranslatorImpl;
diff --git a/deps/v8/src/instruction-stream.cc b/deps/v8/src/instruction-stream.cc
index 7d00ea5434..6a95534af8 100644
--- a/deps/v8/src/instruction-stream.cc
+++ b/deps/v8/src/instruction-stream.cc
@@ -5,62 +5,81 @@
#include "src/instruction-stream.h"
#include "src/builtins/builtins.h"
-#include "src/heap/heap.h"
#include "src/objects-inl.h"
-#include "src/objects/code-inl.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-InstructionStream::InstructionStream(Code* code)
- : builtin_index_(code->builtin_index()) {
- DCHECK(Builtins::IsOffHeapBuiltin(code));
- const size_t page_size = AllocatePageSize();
- byte_length_ =
- RoundUp(static_cast<size_t>(code->instruction_size()), page_size);
-
- bytes_ = static_cast<uint8_t*>(AllocatePages(
- GetRandomMmapAddr(), byte_length_, page_size, PageAllocator::kReadWrite));
- CHECK_NOT_NULL(bytes_);
-
- std::memcpy(bytes_, code->instruction_start(), code->instruction_size());
- CHECK(SetPermissions(bytes_, byte_length_, PageAllocator::kReadExecute));
-}
-
-InstructionStream::~InstructionStream() {
- CHECK(FreePages(bytes_, byte_length_));
+// static
+bool InstructionStream::PcIsOffHeap(Isolate* isolate, Address pc) {
+#ifdef V8_EMBEDDED_BUILTINS
+ const uint8_t* start = isolate->embedded_blob();
+ return start <= pc && pc < start + isolate->embedded_blob_size();
+#else
+ return false;
+#endif
}
// static
Code* InstructionStream::TryLookupCode(Isolate* isolate, Address address) {
- DCHECK(FLAG_stress_off_heap_code);
- // TODO(jgruber,v8:6666): Replace with binary search through range checks
- // once off-heap code is mapped into a contiguous memory space.
- for (const InstructionStream* stream : isolate->off_heap_code_) {
- if (stream->Contains(address)) {
- return isolate->builtins()->builtin(stream->builtin_index());
+#ifdef V8_EMBEDDED_BUILTINS
+ if (!PcIsOffHeap(isolate, address)) return nullptr;
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+
+ int l = 0, r = Builtins::builtin_count;
+ while (l < r) {
+ const int mid = (l + r) / 2;
+ const uint8_t* start = d.InstructionStartOfBuiltin(mid);
+ const uint8_t* end = start + d.InstructionSizeOfBuiltin(mid);
+
+ if (address < start) {
+ r = mid;
+ } else if (address >= end) {
+ l = mid + 1;
+ } else {
+ return isolate->builtins()->builtin(mid);
}
}
+
+ UNREACHABLE();
+#else
return nullptr;
+#endif
}
+#ifdef V8_EMBEDDED_BUILTINS
// static
-InstructionStream* InstructionStream::TryLookupInstructionStream(
- Isolate* isolate, Code* code) {
- DCHECK(FLAG_stress_off_heap_code);
- // TODO(jgruber,v8:6666): Replace with binary search through range checks
- // once off-heap code is mapped into a contiguous memory space.
- const int builtin_index = code->builtin_index();
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- for (InstructionStream* stream : isolate->off_heap_code_) {
- if (stream->builtin_index() == builtin_index) return stream;
- }
- return nullptr;
+void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
+ uint8_t** data,
+ uint32_t* size) {
+ EmbeddedData d = EmbeddedData::FromIsolate(isolate);
+
+ const uint32_t page_size = static_cast<uint32_t>(AllocatePageSize());
+ const uint32_t allocated_size = RoundUp(d.size(), page_size);
+
+ uint8_t* allocated_bytes = static_cast<uint8_t*>(
+ AllocatePages(GetRandomMmapAddr(), allocated_size, page_size,
+ PageAllocator::kReadWrite));
+ CHECK_NOT_NULL(allocated_bytes);
+
+ std::memcpy(allocated_bytes, d.data(), d.size());
+ CHECK(SetPermissions(allocated_bytes, allocated_size,
+ PageAllocator::kReadExecute));
+
+ *data = allocated_bytes;
+ *size = allocated_size;
+
+ d.Dispose();
}
-bool InstructionStream::Contains(Address address) const {
- return bytes_ <= address && address < bytes_ + byte_length_;
+// static
+void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
+ uint32_t size) {
+ CHECK(FreePages(data, size));
}
+#endif // V8_EMBEDDED_BUILTINS
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/instruction-stream.h b/deps/v8/src/instruction-stream.h
index 750e94a955..d659de4266 100644
--- a/deps/v8/src/instruction-stream.h
+++ b/deps/v8/src/instruction-stream.h
@@ -14,32 +14,25 @@ namespace internal {
class Code;
class Isolate;
-// Wraps an mmap'ed off-heap instruction stream. This class will likely become
-// unneeded once --stress-off-heap-code is removed.
-class InstructionStream final {
+// Wraps an off-heap instruction stream.
+// TODO(jgruber,v8:6666): Remove this class.
+class InstructionStream final : public AllStatic {
public:
- explicit InstructionStream(Code* code);
- ~InstructionStream();
+ // Returns true, iff the given pc points into an off-heap instruction stream.
+ static bool PcIsOffHeap(Isolate* isolate, Address pc);
// Returns the corresponding Code object if it exists, and nullptr otherwise.
static Code* TryLookupCode(Isolate* isolate, Address address);
- // Returns the corresponding stream if it exists, and nullptr otherwise.
- static InstructionStream* TryLookupInstructionStream(Isolate* isolate,
- Code* code);
-
- bool Contains(Address address) const;
-
- int builtin_index() const { return builtin_index_; }
- size_t byte_length() const { return byte_length_; }
- uint8_t* bytes() const { return bytes_; }
-
- private:
- size_t byte_length_;
- uint8_t* bytes_;
- int builtin_index_;
-
- DISALLOW_COPY_AND_ASSIGN(InstructionStream)
+#ifdef V8_EMBEDDED_BUILTINS
+ // During snapshot creation, we first create an executable off-heap area
+ // containing all off-heap code. The area is guaranteed to be contiguous.
+ // Note that this only applies when building the snapshot, e.g. for
+ // mksnapshot. Otherwise, off-heap code is embedded directly into the binary.
+ static void CreateOffHeapInstructionStream(Isolate* isolate, uint8_t** data,
+ uint32_t* size);
+ static void FreeOffHeapInstructionStream(uint8_t* data, uint32_t size);
+#endif
};
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index dd704144de..61da0c3f4e 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/globals.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -656,7 +657,9 @@ class ConstructStubDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
-
+// This descriptor is also used by DebugBreakTrampoline because it handles both
+// regular function calls and construct calls, and we need to pass new.target
+// for the latter.
class ConstructTrampolineDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount)
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 2d156e4095..0558f45acf 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -864,6 +864,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreInArrayLiteral(
+ Register array, Register index, int feedback_slot) {
+ OutputStaInArrayLiteral(array, index, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreHomeObjectProperty(
Register object, int feedback_slot, LanguageMode language_mode) {
size_t name_index = HomeObjectSymbolConstantPoolEntry();
@@ -998,6 +1004,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ToName(Register out) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ToString() {
+ OutputToString();
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::ToNumber(int feedback_slot) {
OutputToNumber(feedback_slot);
return *this;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 05086bf714..a22462d1fa 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -21,6 +21,7 @@
namespace v8 {
namespace internal {
+class FeedbackVectorSpec;
class Isolate;
namespace interpreter {
@@ -160,6 +161,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
int feedback_slot,
LanguageMode language_mode);
+ // Store an own element in an array literal. The value to be stored should be
+ // in the accumulator.
+ BytecodeArrayBuilder& StoreInArrayLiteral(Register array, Register index,
+ int feedback_slot);
// Store the home object property. The value to be stored should be in the
// accumulator.
BytecodeArrayBuilder& StoreHomeObjectProperty(Register object,
@@ -371,6 +376,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
// Converts accumulator and stores result in register |out|.
BytecodeArrayBuilder& ToObject(Register out);
BytecodeArrayBuilder& ToName(Register out);
+ BytecodeArrayBuilder& ToString();
// Converts accumulator and stores result back in accumulator.
BytecodeArrayBuilder& ToNumber(int feedback_slot);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 997c5a8da8..29ad2c2a82 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -10,7 +10,6 @@
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
@@ -22,6 +21,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/unoptimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -593,6 +593,11 @@ class BytecodeGenerator::ExpressionResultScope {
type_hint_ = TypeHint::kBoolean;
}
+ void SetResultIsString() {
+ DCHECK_EQ(type_hint_, TypeHint::kAny);
+ type_hint_ = TypeHint::kString;
+ }
+
TypeHint type_hint() const { return type_hint_; }
private:
@@ -705,7 +710,7 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
declarations_.push_back(Declaration(name, slot, nullptr));
}
- Handle<FixedArray> AllocateDeclarations(CompilationInfo* info,
+ Handle<FixedArray> AllocateDeclarations(UnoptimizedCompilationInfo* info,
Handle<Script> script,
Isolate* isolate) {
DCHECK(has_constant_pool_entry_);
@@ -799,31 +804,33 @@ class BytecodeGenerator::CurrentScope final {
class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
public:
- typedef std::pair<TypeofMode, void*> Key;
-
explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
- void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
- Key key = std::make_pair(typeof_mode, variable);
- auto entry = std::make_pair(key, slot);
- map_.insert(entry);
+ void Put(FeedbackSlotKind slot_kind, Variable* variable, FeedbackSlot slot) {
+ PutImpl(slot_kind, variable, slot);
+ }
+ void Put(FeedbackSlotKind slot_kind, AstNode* node, FeedbackSlot slot) {
+ PutImpl(slot_kind, node, slot);
+ }
+
+ FeedbackSlot Get(FeedbackSlotKind slot_kind, Variable* variable) const {
+ return GetImpl(slot_kind, variable);
+ }
+ FeedbackSlot Get(FeedbackSlotKind slot_kind, AstNode* node) const {
+ return GetImpl(slot_kind, node);
}
- void Put(AstNode* node, FeedbackSlot slot) {
- Key key = std::make_pair(NOT_INSIDE_TYPEOF, node);
+
+ private:
+ typedef std::pair<FeedbackSlotKind, void*> Key;
+
+ void PutImpl(FeedbackSlotKind slot_kind, void* node, FeedbackSlot slot) {
+ Key key = std::make_pair(slot_kind, node);
auto entry = std::make_pair(key, slot);
map_.insert(entry);
}
- FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
- Key key = std::make_pair(typeof_mode, variable);
- auto iter = map_.find(key);
- if (iter != map_.end()) {
- return iter->second;
- }
- return FeedbackSlot();
- }
- FeedbackSlot Get(AstNode* node) const {
- Key key = std::make_pair(NOT_INSIDE_TYPEOF, node);
+ FeedbackSlot GetImpl(FeedbackSlotKind slot_kind, void* node) const {
+ Key key = std::make_pair(slot_kind, node);
auto iter = map_.find(key);
if (iter != map_.end()) {
return iter->second;
@@ -831,7 +838,6 @@ class BytecodeGenerator::FeedbackSlotCache : public ZoneObject {
return FeedbackSlot();
}
- private:
ZoneMap<Key, FeedbackSlot> map_;
};
@@ -867,7 +873,8 @@ static bool IsInEagerLiterals(
#endif // DEBUG
BytecodeGenerator::BytecodeGenerator(
- CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ UnoptimizedCompilationInfo* info,
+ const AstStringConstants* ast_string_constants,
ZoneVector<FunctionLiteral*>* eager_inner_literals)
: zone_(info->zone()),
builder_(zone(), info->num_parameters_including_this(),
@@ -913,7 +920,7 @@ Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
info()->set_coverage_info(
isolate->factory()->NewCoverageInfo(block_coverage_builder_->slots()));
if (FLAG_trace_block_coverage) {
- info()->coverage_info()->Print(info()->shared_info()->name());
+ info()->coverage_info()->Print(info()->literal()->GetDebugName());
}
}
@@ -1291,7 +1298,8 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
globals_builder()->set_constant_pool_entry(
builder()->AllocateDeferredConstantPoolEntry());
- int encoded_flags = info()->GetDeclareGlobalsFlags();
+ int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+ DeclareGlobalsNativeFlag::encode(info()->is_native());
// Emit code to declare globals.
RegisterList args = register_allocator()->NewRegisterList(3);
@@ -1301,7 +1309,7 @@ void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
.LoadLiteral(Smi::FromInt(encoded_flags))
.StoreAccumulatorInRegister(args[1])
.MoveRegister(Register::function_closure(), args[2])
- .CallRuntime(Runtime::kDeclareGlobalsForInterpreter, args);
+ .CallRuntime(Runtime::kDeclareGlobals, args);
// Push and reset globals builder.
global_declarations_.push_back(globals_builder());
@@ -2057,6 +2065,7 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
break;
case Literal::kString:
builder()->LoadLiteral(expr->AsRawString());
+ execution_result()->SetResultIsString();
break;
case Literal::kSymbol:
builder()->LoadLiteral(expr->AsSymbol());
@@ -2095,7 +2104,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// If constant properties is an empty fixed array, use a cached empty fixed
// array to ensure it's only added to the constant pool once.
if (expr->properties_count() == 0) {
- entry = builder()->EmptyFixedArrayConstantPoolEntry();
+ entry = builder()->EmptyBoilerplateDescriptionConstantPoolEntry();
} else {
entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
@@ -2128,6 +2137,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->IsStringLiteral()) {
DCHECK(key->IsPropertyName());
if (property->emit_store()) {
+ builder()->SetExpressionPosition(property->value());
VisitForAccumulatorValue(property->value());
FeedbackSlot slot = feedback_spec()->AddStoreOwnICSlot();
if (FunctionLiteral::NeedsHomeObject(property->value())) {
@@ -2142,13 +2152,16 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
literal, key->AsRawPropertyName(), feedback_index(slot));
}
} else {
+ builder()->SetExpressionPosition(property->value());
VisitForEffect(property->value());
}
} else {
RegisterList args = register_allocator()->NewRegisterList(4);
builder()->MoveRegister(literal, args[0]);
+ builder()->SetExpressionPosition(property->key());
VisitForRegisterValue(property->key(), args[1]);
+ builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
if (property->emit_store()) {
builder()
@@ -2168,6 +2181,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
+ builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
break;
@@ -2221,6 +2235,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(!property->NeedsSetFunctionName());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
+ builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kInternalSetPrototype, args);
continue;
@@ -2232,6 +2247,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
Register key = register_allocator()->NewRegister();
BuildLoadPropertyKey(property, key);
+ builder()->SetExpressionPosition(property->value());
Register value = VisitForRegisterValue(property->value());
VisitSetHomeObject(value, literal, property);
@@ -2254,6 +2270,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterList args = register_allocator()->NewRegisterList(4);
builder()->MoveRegister(literal, args[0]);
BuildLoadPropertyKey(property, args[1]);
+ builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[2]);
VisitSetHomeObject(args[2], literal, property);
builder()
@@ -2269,6 +2286,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::SPREAD: {
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
+ builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
builder()->CallRuntime(Runtime::kCopyDataProperties, args);
break;
@@ -2300,17 +2318,18 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->CreateArrayLiteral(entry, literal_index, flags);
array_literals_.push_back(std::make_pair(expr, entry));
- Register index = register_allocator()->NewRegister();
Register literal = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(literal);
// We'll reuse the same literal slot for all of the non-constant
// subexpressions that use a keyed store IC.
+ Register index = register_allocator()->NewRegister();
+ int array_index = 0;
+
// Evaluate all the non-constant subexpressions and store them into the
// newly cloned array.
FeedbackSlot slot;
- int array_index = 0;
ZoneList<Expression*>::iterator iter = expr->BeginValue();
for (; iter != expr->FirstSpreadOrEndValue(); ++iter, array_index++) {
Expression* subexpr = *iter;
@@ -2326,32 +2345,38 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->StoreKeyedProperty(literal, index, feedback_index(slot),
language_mode());
}
-
- // Handle spread elements and elements following.
- for (; iter != expr->EndValue(); ++iter) {
- Expression* subexpr = *iter;
- if (subexpr->IsSpread()) {
- BuildArrayLiteralSpread(subexpr->AsSpread(), literal);
- } else if (!subexpr->IsTheHoleLiteral()) {
- // Perform %AppendElement(array, <subexpr>)
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()->MoveRegister(literal, args[0]);
- VisitForRegisterValue(subexpr, args[1]);
- builder()->CallRuntime(Runtime::kAppendElement, args);
- } else {
- // Peform ++<array>.length;
- // TODO(caitp): Why can't we just %AppendElement(array, <The Hole>?)
- auto length = ast_string_constants()->length_string();
- builder()->LoadNamedProperty(
- literal, length, feedback_index(feedback_spec()->AddLoadICSlot()));
- builder()->UnaryOperation(
- Token::INC, feedback_index(feedback_spec()->AddBinaryOpICSlot()));
- builder()->StoreNamedProperty(
- literal, length,
- feedback_index(
- feedback_spec()->AddStoreICSlot(LanguageMode::kStrict)),
- LanguageMode::kStrict);
+ if (iter != expr->EndValue()) {
+ builder()->LoadLiteral(array_index).StoreAccumulatorInRegister(index);
+
+ // Handle the first spread element and everything that follows.
+ FeedbackSlot element_slot = feedback_spec()->AddStoreInArrayLiteralICSlot();
+ FeedbackSlot index_slot = feedback_spec()->AddBinaryOpICSlot();
+ // TODO(neis): Only create length_slot when there are holes.
+ FeedbackSlot length_slot =
+ feedback_spec()->AddStoreICSlot(LanguageMode::kStrict);
+ for (; iter != expr->EndValue(); ++iter) {
+ Expression* subexpr = *iter;
+ if (subexpr->IsSpread()) {
+ BuildArrayLiteralSpread(subexpr->AsSpread(), literal, index, index_slot,
+ element_slot);
+ } else if (!subexpr->IsTheHoleLiteral()) {
+ // literal[index++] = subexpr
+ VisitForAccumulatorValue(subexpr);
+ builder()
+ ->StoreInArrayLiteral(literal, index, feedback_index(element_slot))
+ .LoadAccumulatorWithRegister(index)
+ .UnaryOperation(Token::INC, feedback_index(index_slot))
+ .StoreAccumulatorInRegister(index);
+ } else {
+ // literal.length = ++index
+ auto length = ast_string_constants()->length_string();
+ builder()
+ ->LoadAccumulatorWithRegister(index)
+ .UnaryOperation(Token::INC, feedback_index(index_slot))
+ .StoreAccumulatorInRegister(index)
+ .StoreNamedProperty(literal, length, feedback_index(length_slot),
+ LanguageMode::kStrict);
+ }
}
}
@@ -2359,38 +2384,58 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
builder()->LoadAccumulatorWithRegister(literal);
}
-void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread,
- Register array) {
+void BytecodeGenerator::BuildArrayLiteralSpread(Spread* spread, Register array,
+ Register index,
+ FeedbackSlot index_slot,
+ FeedbackSlot element_slot) {
RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()->MoveRegister(array, args[0]);
- Register next_result = args[1];
+ Register value = register_allocator()->NewRegister();
builder()->SetExpressionAsStatementPosition(spread->expression());
IteratorRecord iterator =
BuildGetIteratorRecord(spread->expression(), IteratorType::kNormal);
+
LoopBuilder loop_builder(builder(), nullptr, nullptr);
loop_builder.LoopHeader();
// Call the iterator's .next() method. Break from the loop if the `done`
// property is truthy, otherwise load the value from the iterator result and
// append the argument.
- BuildIteratorNext(iterator, next_result);
+ BuildIteratorNext(iterator, value);
builder()->LoadNamedProperty(
- next_result, ast_string_constants()->done_string(),
+ value, ast_string_constants()->done_string(),
feedback_index(feedback_spec()->AddLoadICSlot()));
loop_builder.BreakIfTrue(ToBooleanMode::kConvertToBoolean);
loop_builder.LoopBody();
builder()
- ->LoadNamedProperty(next_result, ast_string_constants()->value_string(),
+ // value = value.value
+ ->LoadNamedProperty(value, ast_string_constants()->value_string(),
feedback_index(feedback_spec()->AddLoadICSlot()))
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kAppendElement, args);
+ .StoreAccumulatorInRegister(value)
+ // array[index] = value
+ .StoreInArrayLiteral(array, index, feedback_index(element_slot))
+ // index++
+ .LoadAccumulatorWithRegister(index)
+ .UnaryOperation(Token::INC, feedback_index(index_slot))
+ .StoreAccumulatorInRegister(index);
loop_builder.BindContinueTarget();
loop_builder.JumpToHeader(loop_depth_);
}
+void BytecodeGenerator::VisitStoreInArrayLiteral(StoreInArrayLiteral* expr) {
+ builder()->SetExpressionAsStatementPosition(expr);
+ RegisterAllocationScope register_scope(this);
+ Register array = register_allocator()->NewRegister();
+ Register index = register_allocator()->NewRegister();
+ VisitForRegisterValue(expr->array(), array);
+ VisitForRegisterValue(expr->index(), index);
+ VisitForAccumulatorValue(expr->value());
+ builder()->StoreInArrayLiteral(
+ array, index,
+ feedback_index(feedback_spec()->AddStoreInArrayLiteralICSlot()));
+}
+
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
BuildVariableLoad(proxy->var(), proxy->hole_check_mode());
@@ -2616,9 +2661,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
- // TODO(ishell): consider using FeedbackSlotCache for variables here.
- FeedbackSlot slot =
- feedback_spec()->AddStoreGlobalICSlot(language_mode());
+ FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
break;
}
@@ -3968,13 +4011,23 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
Expression* subexpr;
Smi* literal;
if (expr->IsSmiLiteralOperation(&subexpr, &literal)) {
- VisitForAccumulatorValue(subexpr);
+ TypeHint type_hint = VisitForAccumulatorValue(subexpr);
builder()->SetExpressionPosition(expr);
builder()->BinaryOperationSmiLiteral(expr->op(), literal,
feedback_index(slot));
+ if (expr->op() == Token::ADD && type_hint == TypeHint::kString) {
+ execution_result()->SetResultIsString();
+ }
} else {
- Register lhs = VisitForRegisterValue(expr->left());
- VisitForAccumulatorValue(expr->right());
+ TypeHint lhs_type = VisitForAccumulatorValue(expr->left());
+ Register lhs = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(lhs);
+ TypeHint rhs_type = VisitForAccumulatorValue(expr->right());
+ if (expr->op() == Token::ADD &&
+ (lhs_type == TypeHint::kString || rhs_type == TypeHint::kString)) {
+ execution_result()->SetResultIsString();
+ }
+
builder()->SetExpressionPosition(expr);
builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
}
@@ -3982,7 +4035,7 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
void BytecodeGenerator::VisitNaryArithmeticExpression(NaryOperation* expr) {
// TODO(leszeks): Add support for lhs smi in commutative ops.
- VisitForAccumulatorValue(expr->first());
+ TypeHint type_hint = VisitForAccumulatorValue(expr->first());
for (size_t i = 0; i < expr->subsequent_length(); ++i) {
RegisterAllocationScope register_scope(this);
@@ -3994,15 +4047,23 @@ void BytecodeGenerator::VisitNaryArithmeticExpression(NaryOperation* expr) {
} else {
Register lhs = register_allocator()->NewRegister();
builder()->StoreAccumulatorInRegister(lhs);
- VisitForAccumulatorValue(expr->subsequent(i));
+ TypeHint rhs_hint = VisitForAccumulatorValue(expr->subsequent(i));
+ if (rhs_hint == TypeHint::kString) type_hint = TypeHint::kString;
builder()->SetExpressionPosition(expr->subsequent_op_position(i));
builder()->BinaryOperation(
expr->op(), lhs,
feedback_index(feedback_spec()->AddBinaryOpICSlot()));
}
}
+
+ if (type_hint == TypeHint::kString && expr->op() == Token::ADD) {
+ // If any operand of an ADD is a String, a String is produced.
+ execution_result()->SetResultIsString();
+ }
}
+// Note: the actual spreading is performed by the surrounding expression's
+// visitor.
void BytecodeGenerator::VisitSpread(Spread* expr) { Visit(expr->expression()); }
void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
@@ -4181,6 +4242,53 @@ void BytecodeGenerator::VisitGetTemplateObject(GetTemplateObject* expr) {
builder()->GetTemplateObject(entry, feedback_index(literal_slot));
}
+void BytecodeGenerator::VisitTemplateLiteral(TemplateLiteral* expr) {
+ const TemplateLiteral::StringList& parts = *expr->string_parts();
+ const TemplateLiteral::ExpressionList& substitutions = *expr->substitutions();
+ // Template strings with no substitutions are turned into StringLiterals.
+ DCHECK_GT(substitutions.length(), 0);
+ DCHECK_EQ(parts.length(), substitutions.length() + 1);
+
+ // Generate string concatenation
+ // TODO(caitp): Don't generate feedback slot if it's not used --- introduce
+ // a simple, concise, reusable mechanism to lazily create reusable slots.
+ FeedbackSlot slot = feedback_spec()->AddBinaryOpICSlot();
+ Register last_part = register_allocator()->NewRegister();
+ bool last_part_valid = false;
+
+ builder()->SetExpressionPosition(expr);
+ for (int i = 0; i < substitutions.length(); ++i) {
+ if (i != 0) {
+ builder()->StoreAccumulatorInRegister(last_part);
+ last_part_valid = true;
+ }
+
+ if (!parts[i]->IsEmpty()) {
+ builder()->LoadLiteral(parts[i]);
+ if (last_part_valid) {
+ builder()->BinaryOperation(Token::ADD, last_part, feedback_index(slot));
+ }
+ builder()->StoreAccumulatorInRegister(last_part);
+ last_part_valid = true;
+ }
+
+ TypeHint type_hint = VisitForAccumulatorValue(substitutions[i]);
+ if (type_hint != TypeHint::kString) {
+ builder()->ToString();
+ }
+ if (last_part_valid) {
+ builder()->BinaryOperation(Token::ADD, last_part, feedback_index(slot));
+ }
+ last_part_valid = false;
+ }
+
+ if (!parts.last()->IsEmpty()) {
+ builder()->StoreAccumulatorInRegister(last_part);
+ builder()->LoadLiteral(parts.last());
+ builder()->BinaryOperation(Token::ADD, last_part, feedback_index(slot));
+ }
+}
+
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
@@ -4856,23 +4964,42 @@ int BytecodeGenerator::feedback_index(FeedbackSlot slot) const {
FeedbackSlot BytecodeGenerator::GetCachedLoadGlobalICSlot(
TypeofMode typeof_mode, Variable* variable) {
- FeedbackSlot slot = feedback_slot_cache()->Get(typeof_mode, variable);
+ FeedbackSlotKind slot_kind =
+ typeof_mode == INSIDE_TYPEOF
+ ? FeedbackSlotKind::kLoadGlobalInsideTypeof
+ : FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
+ FeedbackSlot slot = feedback_slot_cache()->Get(slot_kind, variable);
if (!slot.IsInvalid()) {
return slot;
}
slot = feedback_spec()->AddLoadGlobalICSlot(typeof_mode);
- feedback_slot_cache()->Put(typeof_mode, variable, slot);
+ feedback_slot_cache()->Put(slot_kind, variable, slot);
+ return slot;
+}
+
+FeedbackSlot BytecodeGenerator::GetCachedStoreGlobalICSlot(
+ LanguageMode language_mode, Variable* variable) {
+ FeedbackSlotKind slot_kind = is_strict(language_mode)
+ ? FeedbackSlotKind::kStoreGlobalStrict
+ : FeedbackSlotKind::kStoreGlobalSloppy;
+ FeedbackSlot slot = feedback_slot_cache()->Get(slot_kind, variable);
+ if (!slot.IsInvalid()) {
+ return slot;
+ }
+ slot = feedback_spec()->AddStoreGlobalICSlot(language_mode);
+ feedback_slot_cache()->Put(slot_kind, variable, slot);
return slot;
}
FeedbackSlot BytecodeGenerator::GetCachedCreateClosureSlot(
FunctionLiteral* literal) {
- FeedbackSlot slot = feedback_slot_cache()->Get(literal);
+ FeedbackSlotKind slot_kind = FeedbackSlotKind::kCreateClosure;
+ FeedbackSlot slot = feedback_slot_cache()->Get(slot_kind, literal);
if (!slot.IsInvalid()) {
return slot;
}
slot = feedback_spec()->AddCreateClosureSlot();
- feedback_slot_cache()->Put(literal, slot);
+ feedback_slot_cache()->Put(slot_kind, literal, slot);
return slot;
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index c96e5e9e83..e6e63f3c8e 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -16,7 +16,7 @@ namespace internal {
class AstNodeSourceRanges;
class AstStringConstants;
-class CompilationInfo;
+class UnoptimizedCompilationInfo;
enum class SourceRangeKind;
namespace interpreter {
@@ -29,7 +29,8 @@ class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
explicit BytecodeGenerator(
- CompilationInfo* info, const AstStringConstants* ast_string_constants,
+ UnoptimizedCompilationInfo* info,
+ const AstStringConstants* ast_string_constants,
ZoneVector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
@@ -66,7 +67,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
enum class TestFallthrough { kThen, kElse, kNone };
- enum class TypeHint { kAny, kBoolean };
+ enum class TypeHint { kAny, kBoolean, kString };
void GenerateBytecodeBody();
void AllocateDeferredConstants(Isolate* isolate, Handle<Script> script);
@@ -173,7 +174,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
BytecodeLabel* if_called,
BytecodeLabels* if_notcalled);
- void BuildArrayLiteralSpread(Spread* spread, Register array);
+ void BuildArrayLiteralSpread(Spread* spread, Register array, Register index,
+ FeedbackSlot index_slot,
+ FeedbackSlot element_slot);
void AllocateTopLevelRegisters();
void VisitArgumentsObject(Variable* variable);
@@ -240,7 +243,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// specifies the type of the result of the visited expression.
TypeHint VisitForAccumulatorValue(Expression* expr);
void VisitForAccumulatorValueOrTheHole(Expression* expr);
- MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
+ V8_WARN_UNUSED_RESULT Register VisitForRegisterValue(Expression* expr);
INLINE(void VisitForRegisterValue(Expression* expr, Register destination));
void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
void VisitForEffect(Expression* expr);
@@ -260,6 +263,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// already exists.
FeedbackSlot GetCachedLoadGlobalICSlot(TypeofMode typeof_mode,
Variable* variable);
+ FeedbackSlot GetCachedStoreGlobalICSlot(LanguageMode language_mode,
+ Variable* variable);
FeedbackSlot GetCachedCreateClosureSlot(FunctionLiteral* literal);
void AddToEagerLiteralsIfEager(FunctionLiteral* literal);
@@ -274,7 +279,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
inline BytecodeArrayBuilder* builder() { return &builder_; }
inline Zone* zone() const { return zone_; }
inline DeclarationScope* closure_scope() const { return closure_scope_; }
- inline CompilationInfo* info() const { return info_; }
+ inline UnoptimizedCompilationInfo* info() const { return info_; }
inline const AstStringConstants* ast_string_constants() const {
return ast_string_constants_;
}
@@ -320,7 +325,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Zone* zone_;
BytecodeArrayBuilder builder_;
- CompilationInfo* info_;
+ UnoptimizedCompilationInfo* info_;
const AstStringConstants* ast_string_constants_;
DeclarationScope* closure_scope_;
Scope* current_scope_;
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index be31f27c76..5533287061 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -114,6 +114,8 @@ namespace interpreter {
OperandType::kIdx, OperandType::kIdx) \
V(StaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
+ V(StaInArrayLiteral, AccumulatorUse::kReadWrite, OperandType::kReg, \
+ OperandType::kReg, OperandType::kIdx) \
V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
V(CollectTypeProfile, AccumulatorUse::kRead, OperandType::kImm) \
@@ -238,6 +240,7 @@ namespace interpreter {
V(ToNumber, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToNumeric, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(ToObject, AccumulatorUse::kRead, OperandType::kRegOut) \
+ V(ToString, AccumulatorUse::kReadWrite) \
\
/* Literals */ \
V(CreateRegExpLiteral, AccumulatorUse::kWrite, OperandType::kIdx, \
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 549b2edefc..e66946f2c5 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -21,13 +21,15 @@ class AstValue;
namespace interpreter {
// Constant array entries that represent singletons.
-#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
- V(NaN, nan_value) \
- V(IteratorSymbol, iterator_symbol) \
- V(AsyncIteratorSymbol, async_iterator_symbol) \
- V(HomeObjectSymbol, home_object_symbol) \
- V(EmptyFixedArray, empty_fixed_array) \
- V(ClassFieldsSymbol, class_fields_symbol)
+#define SINGLETON_CONSTANT_ENTRY_TYPES(V) \
+ V(AsyncIteratorSymbol, async_iterator_symbol) \
+ V(ClassFieldsSymbol, class_fields_symbol) \
+ V(EmptyBoilerplateDescription, empty_boilerplate_description) \
+ V(EmptyFixedArray, empty_fixed_array) \
+ V(HomeObjectSymbol, home_object_symbol) \
+ V(IteratorSymbol, iterator_symbol) \
+ V(InterpreterTrampolineSymbol, interpreter_trampoline_symbol) \
+ V(NaN, nan_value)
// A helper class for constructing constant arrays for the
// interpreter. Each instance of this class is intended to be used to
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index 93db1e969a..618589b6e9 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/handler-table-builder.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/interpreter/bytecode-register.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index b2c4ba2309..0ebc303923 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -48,8 +48,6 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
made_call_(false),
reloaded_frame_ptr_(false),
bytecode_array_valid_(true),
- speculation_poison_(FLAG_untrusted_code_mitigations ? SpeculationPoison()
- : nullptr),
disable_stack_check_across_call_(false),
stack_pointer_before_call_(nullptr) {
#ifdef V8_TRACE_IGNITION
@@ -74,24 +72,6 @@ InterpreterAssembler::~InterpreterAssembler() {
UnregisterCallGenerationCallbacks();
}
-Node* InterpreterAssembler::PoisonOnSpeculationTagged(Node* value) {
- if (speculation_poison_ == nullptr) return value;
- return BitcastWordToTagged(
- WordAnd(speculation_poison_, BitcastTaggedToWord(value)));
-}
-
-Node* InterpreterAssembler::PoisonOnSpeculationWord(Node* value) {
- if (speculation_poison_ == nullptr) return value;
- return WordAnd(speculation_poison_, value);
-}
-
-Node* InterpreterAssembler::PoisonOnSpeculationInt32(Node* value) {
- if (speculation_poison_ == nullptr) return value;
- Node* truncated_speculation_poison =
- Is64() ? TruncateInt64ToInt32(speculation_poison_) : speculation_poison_;
- return Word32And(truncated_speculation_poison, value);
-}
-
Node* InterpreterAssembler::GetInterpretedFramePointer() {
if (!interpreted_frame_pointer_.IsBound()) {
interpreted_frame_pointer_.Bind(LoadParentFramePointer());
@@ -255,9 +235,8 @@ Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- Node* value = Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index));
- return PoisonOnSpeculationTagged(value);
+ return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
+ RegisterFrameOffset(reg_index), LoadSensitivity::kNeedsPoisoning);
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
@@ -271,14 +250,16 @@ Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
}
Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
- return LoadRegister(BytecodeOperandRegUnpoisoned(operand_index));
+ return LoadRegister(
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
}
std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ Node* first_reg_index =
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
Node* second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
LoadRegister(second_reg_index));
@@ -290,8 +271,8 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index + 1));
- Node* base_reg =
- RegisterLocation(BytecodeOperandRegUnpoisoned(operand_index));
+ Node* base_reg = RegisterLocation(
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
Node* reg_count = BytecodeOperandCount(operand_index + 1);
return RegListNodePair(base_reg, reg_count);
}
@@ -332,7 +313,8 @@ void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
int operand_index) {
- StoreRegister(value, BytecodeOperandRegUnpoisoned(operand_index));
+ StoreRegister(value,
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
}
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
@@ -340,7 +322,8 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ Node* first_reg_index =
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
Node* second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -350,7 +333,8 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
Node* value1, Node* value2, Node* value3, int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- Node* first_reg_index = BytecodeOperandRegUnpoisoned(operand_index);
+ Node* first_reg_index =
+ BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
StoreRegister(value1, first_reg_index);
Node* second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -368,28 +352,29 @@ Node* InterpreterAssembler::OperandOffset(int operand_index) {
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedByteUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
Node* operand_offset = OperandOffset(operand_index);
return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset));
+ IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
}
-Node* InterpreterAssembler::BytecodeOperandSignedByteUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedByte(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
Node* operand_offset = OperandOffset(operand_index);
return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset));
+ IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
}
-Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned(
- int relative_offset, MachineType result_type) {
+Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
+ int relative_offset, MachineType result_type,
+ LoadSensitivity needs_poisoning) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -426,7 +411,8 @@ Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned(
MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
- bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset);
+ bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
+ needs_poisoning);
}
// Pack LSB to MSB.
@@ -439,8 +425,8 @@ Node* InterpreterAssembler::BytecodeOperandReadUnalignedUnpoisoned(
return result;
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedShortUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -449,15 +435,16 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShortUnpoisoned(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
if (TargetSupportsUnalignedAccess()) {
return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+ needs_poisoning);
} else {
- return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
- MachineType::Uint16());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
+ needs_poisoning);
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedShortUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedShort(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -466,15 +453,16 @@ Node* InterpreterAssembler::BytecodeOperandSignedShortUnpoisoned(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
if (TargetSupportsUnalignedAccess()) {
return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+ needs_poisoning);
} else {
- return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
- MachineType::Int16());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
+ needs_poisoning);
}
}
-Node* InterpreterAssembler::BytecodeOperandUnsignedQuadUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -482,15 +470,16 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuadUnpoisoned(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
if (TargetSupportsUnalignedAccess()) {
return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+ needs_poisoning);
} else {
- return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
- MachineType::Uint32());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
+ needs_poisoning);
}
}
-Node* InterpreterAssembler::BytecodeOperandSignedQuadUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandSignedQuad(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -498,59 +487,50 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuadUnpoisoned(
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
if (TargetSupportsUnalignedAccess()) {
return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
+ needs_poisoning);
} else {
- return BytecodeOperandReadUnalignedUnpoisoned(operand_offset,
- MachineType::Int32());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
+ needs_poisoning);
}
}
-Node* InterpreterAssembler::BytecodeSignedOperandUnpoisoned(
- int operand_index, OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeSignedOperand(
+ int operand_index, OperandSize operand_size,
+ LoadSensitivity needs_poisoning) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByteUnpoisoned(operand_index);
+ return BytecodeOperandSignedByte(operand_index, needs_poisoning);
case OperandSize::kShort:
- return BytecodeOperandSignedShortUnpoisoned(operand_index);
+ return BytecodeOperandSignedShort(operand_index, needs_poisoning);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuadUnpoisoned(operand_index);
+ return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
-Node* InterpreterAssembler::BytecodeUnsignedOperandUnpoisoned(
- int operand_index, OperandSize operand_size) {
+Node* InterpreterAssembler::BytecodeUnsignedOperand(
+ int operand_index, OperandSize operand_size,
+ LoadSensitivity needs_poisoning) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByteUnpoisoned(operand_index);
+ return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShortUnpoisoned(operand_index);
+ return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuadUnpoisoned(operand_index);
+ return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
case OperandSize::kNone:
UNREACHABLE();
}
return nullptr;
}
-Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
- OperandSize operand_size) {
- return PoisonOnSpeculationInt32(
- BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
-}
-
-Node* InterpreterAssembler::BytecodeUnsignedOperand(int operand_index,
- OperandSize operand_size) {
- return PoisonOnSpeculationInt32(
- BytecodeUnsignedOperandUnpoisoned(operand_index, operand_size));
-}
-
Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -616,23 +596,24 @@ Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(BytecodeOperandIdx(operand_index));
}
-Node* InterpreterAssembler::BytecodeOperandConstantPoolIdxUnpoisoned(
- int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeUint32ToWord(
- BytecodeUnsignedOperand(operand_index, operand_size));
+ BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
}
-Node* InterpreterAssembler::BytecodeOperandRegUnpoisoned(int operand_index) {
+Node* InterpreterAssembler::BytecodeOperandReg(
+ int operand_index, LoadSensitivity needs_poisoning) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperandUnpoisoned(operand_index, operand_size));
+ BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -666,7 +647,8 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
- return PoisonOnSpeculationTagged(LoadFixedArrayElement(constant_pool, index));
+ return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
+ LoadSensitivity::kNeedsPoisoning);
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
@@ -675,7 +657,8 @@ Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- Node* index = BytecodeOperandConstantPoolIdxUnpoisoned(operand_index);
+ Node* index =
+ BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
return LoadConstantPoolEntry(index);
}
@@ -1085,7 +1068,7 @@ Node* InterpreterAssembler::Construct(Node* target, Node* context,
// constructor feedback collection inside of Ignition.
Comment("call using ConstructArray builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
- isolate(), InterpreterPushArgsMode::kJSFunction);
+ isolate(), InterpreterPushArgsMode::kArrayFunction);
Node* code_target = HeapConstant(callable.code());
var_result.Bind(CallStub(callable.descriptor(), code_target, context,
args.reg_count(), new_target, target,
@@ -1388,7 +1371,8 @@ void InterpreterAssembler::InlineStar() {
#ifdef V8_TRACE_IGNITION
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
#endif
- StoreRegister(GetAccumulator(), BytecodeOperandRegUnpoisoned(0));
+ StoreRegister(GetAccumulator(),
+ BytecodeOperandReg(0, LoadSensitivity::kSafe));
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index cb622d0b2d..6f8ee3d323 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -289,11 +289,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* LoadRegister(Node* reg_index);
void StoreRegister(compiler::Node* value, compiler::Node* reg_index);
- // Poison |value| on speculative paths.
- compiler::Node* PoisonOnSpeculationTagged(Node* value);
- compiler::Node* PoisonOnSpeculationWord(Node* value);
- compiler::Node* PoisonOnSpeculationInt32(Node* value);
-
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
void CallPrologue();
@@ -321,37 +316,51 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- compiler::Node* BytecodeOperandReadUnalignedUnpoisoned(
- int relative_offset, MachineType result_type);
-
- // Returns zero- or sign-extended to word32 value of the operand. Values are
- // not poisoned on speculation - should be used with care.
- compiler::Node* BytecodeOperandUnsignedByteUnpoisoned(int operand_index);
- compiler::Node* BytecodeOperandSignedByteUnpoisoned(int operand_index);
- compiler::Node* BytecodeOperandUnsignedShortUnpoisoned(int operand_index);
- compiler::Node* BytecodeOperandSignedShortUnpoisoned(int operand_index);
- compiler::Node* BytecodeOperandUnsignedQuadUnpoisoned(int operand_index);
- compiler::Node* BytecodeOperandSignedQuadUnpoisoned(int operand_index);
- compiler::Node* BytecodeSignedOperandUnpoisoned(int operand_index,
- OperandSize operand_size);
- compiler::Node* BytecodeUnsignedOperandUnpoisoned(int operand_index,
- OperandSize operand_size);
+ compiler::Node* BytecodeOperandReadUnaligned(
+ int relative_offset, MachineType result_type,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+
+ // Returns zero- or sign-extended to word32 value of the operand.
+ compiler::Node* BytecodeOperandUnsignedByte(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeOperandSignedByte(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeOperandUnsignedShort(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeOperandSignedShort(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeOperandUnsignedQuad(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeOperandSignedQuad(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- compiler::Node* BytecodeSignedOperand(int operand_index,
- OperandSize operand_size);
- compiler::Node* BytecodeUnsignedOperand(int operand_index,
- OperandSize operand_size);
+ compiler::Node* BytecodeSignedOperand(
+ int operand_index, OperandSize operand_size,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
+ compiler::Node* BytecodeUnsignedOperand(
+ int operand_index, OperandSize operand_size,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
// Returns the word-size sign-extended register index for bytecode operand
// |operand_index| in the current bytecode. Value is not poisoned on
// speculation since the value loaded from the register is poisoned instead.
- compiler::Node* BytecodeOperandRegUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandReg(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
// Returns the word zero-extended index immediate for bytecode operand
// |operand_index| in the current bytecode for use when loading a .
- compiler::Node* BytecodeOperandConstantPoolIdxUnpoisoned(int operand_index);
+ compiler::Node* BytecodeOperandConstantPoolIdx(
+ int operand_index,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kNeedsPoisoning);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
@@ -413,9 +422,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
bool made_call_;
bool reloaded_frame_ptr_;
bool bytecode_array_valid_;
-
- Node* speculation_poison_;
-
bool disable_stack_check_across_call_;
compiler::Node* stack_pointer_before_call_;
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 65af249ea7..961597ee26 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -157,9 +157,10 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Node* feedback_slot = BytecodeOperandIdx(slot_operand_index);
AccessorAssembler accessor_asm(state());
- Label done(this);
- Variable var_result(this, MachineRepresentation::kTagged);
- ExitPoint exit_point(this, &done, &var_result);
+ ExitPoint exit_point(this, [=](Node* result) {
+ SetAccumulator(result);
+ Dispatch();
+ });
LazyNode<Context> lazy_context = [=] { return CAST(GetContext()); };
@@ -171,10 +172,6 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
accessor_asm.LoadGlobalIC(feedback_vector, feedback_slot, lazy_context,
lazy_name, typeof_mode, &exit_point,
CodeStubAssembler::INTPTR_PARAMETERS);
-
- BIND(&done);
- SetAccumulator(var_result.value());
- Dispatch();
}
};
@@ -213,8 +210,8 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
Node* feedback_vector = LoadFeedbackVector();
- Callable ic = Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
- CallStub(ic, context, name, value, smi_slot, feedback_vector);
+ CallBuiltin(Builtins::kStoreGlobalIC, context, name, value, smi_slot,
+ feedback_vector);
Dispatch();
}
@@ -523,16 +520,14 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
IGNITION_HANDLER(LdaKeyedProperty, InterpreterAssembler) {
- Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
- Node* code_target = HeapConstant(ic.code());
Node* object = LoadRegisterAtOperandIndex(0);
Node* name = GetAccumulator();
Node* raw_slot = BytecodeOperandIdx(1);
Node* smi_slot = SmiTag(raw_slot);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
- smi_slot, feedback_vector);
+ Node* result = CallBuiltin(Builtins::kKeyedLoadIC, context, object, name,
+ smi_slot, feedback_vector);
SetAccumulator(result);
Dispatch();
}
@@ -555,7 +550,9 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
Node* context = GetContext();
Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
value, smi_slot, feedback_vector);
- // It doesn't really matter what we write to the accumulator here, since we
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(result);
@@ -588,8 +585,6 @@ IGNITION_HANDLER(StaNamedOwnProperty, InterpreterStoreNamedPropertyAssembler) {
// Calls the KeyedStoreIC at FeedbackVector slot <slot> for <object> and
// the key <key> with the value in the accumulator.
IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
- Callable ic = Builtins::CallableFor(isolate(), Builtins::kKeyedStoreIC);
- Node* code_target = HeapConstant(ic.code());
Node* object = LoadRegisterAtOperandIndex(0);
Node* name = LoadRegisterAtOperandIndex(1);
Node* value = GetAccumulator();
@@ -597,9 +592,34 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
Node* smi_slot = SmiTag(raw_slot);
Node* feedback_vector = LoadFeedbackVector();
Node* context = GetContext();
- Node* result = CallStub(ic.descriptor(), code_target, context, object, name,
- value, smi_slot, feedback_vector);
- // It doesn't really matter what we write to the accumulator here, since we
+ Node* result = CallBuiltin(Builtins::kKeyedStoreIC, context, object, name,
+ value, smi_slot, feedback_vector);
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
+ Dispatch();
+}
+
+// StaInArrayLiteral <array> <index> <slot>
+//
+// Calls the StoreInArrayLiteralIC at FeedbackVector slot <slot> for <array> and
+// the key <index> with the value in the accumulator.
+IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
+ Node* array = LoadRegisterAtOperandIndex(0);
+ Node* index = LoadRegisterAtOperandIndex(1);
+ Node* value = GetAccumulator();
+ Node* raw_slot = BytecodeOperandIdx(2);
+ Node* smi_slot = SmiTag(raw_slot);
+ Node* feedback_vector = LoadFeedbackVector();
+ Node* context = GetContext();
+ Node* result = CallBuiltin(Builtins::kStoreInArrayLiteralIC, context, array,
+ index, value, smi_slot, feedback_vector);
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
SetAccumulator(result);
@@ -1274,15 +1294,21 @@ IGNITION_HANDLER(ToNumeric, InterpreterAssembler) {
//
// Convert the object referenced by the accumulator to a JSReceiver.
IGNITION_HANDLER(ToObject, InterpreterAssembler) {
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
- Node* target = HeapConstant(callable.code());
Node* accumulator = GetAccumulator();
Node* context = GetContext();
- Node* result = CallStub(callable.descriptor(), target, context, accumulator);
+ Node* result = CallBuiltin(Builtins::kToObject, context, accumulator);
StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
+// ToString
+//
+// Convert the accumulator to a String.
+IGNITION_HANDLER(ToString, InterpreterAssembler) {
+ SetAccumulator(ToString_Inline(GetContext(), GetAccumulator()));
+ Dispatch();
+}
+
class IncDecAssembler : public UnaryNumericOpAssembler {
public:
explicit IncDecAssembler(CodeAssemblerState* state, Bytecode bytecode,
@@ -1452,7 +1478,7 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
IGNITION_HANDLER(GetSuperConstructor, InterpreterAssembler) {
Node* active_function = GetAccumulator();
Node* context = GetContext();
- Node* result = GetSuperConstructor(active_function, context);
+ Node* result = GetSuperConstructor(context, active_function);
StoreRegisterAtOperandIndex(result, 0);
Dispatch();
}
@@ -2580,12 +2606,10 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
// duplicate parameters.
Node* shared_info =
LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
- Node* compiler_hints =
- LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
+ Node* flags = LoadObjectField(shared_info, SharedFunctionInfo::kFlagsOffset,
+ MachineType::Uint32());
Node* has_duplicate_parameters =
- IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(
- compiler_hints);
+ IsSetWord32<SharedFunctionInfo::HasDuplicateParametersBit>(flags);
Branch(has_duplicate_parameters, &if_duplicate_parameters,
&if_not_duplicate_parameters);
@@ -3084,7 +3108,10 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
InterpreterDispatchDescriptor descriptor(isolate);
compiler::CodeAssemblerState state(
isolate, &zone, descriptor, Code::BYTECODE_HANDLER,
- Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
+ Bytecodes::ToString(bytecode),
+ FLAG_untrusted_code_mitigations ? PoisoningMitigationLevel::kOn
+ : PoisoningMitigationLevel::kOff,
+ Bytecodes::ReturnCount(bytecode));
switch (bytecode) {
#define CALL_GENERATOR(Name, ...) \
@@ -3154,9 +3181,11 @@ Handle<Code> GenerateDeserializeLazyHandler(Isolate* isolate,
}
InterpreterDispatchDescriptor descriptor(isolate);
- compiler::CodeAssemblerState state(isolate, &zone, descriptor,
- Code::BYTECODE_HANDLER, debug_name.c_str(),
- return_count);
+ compiler::CodeAssemblerState state(
+ isolate, &zone, descriptor, Code::BYTECODE_HANDLER, debug_name.c_str(),
+ FLAG_untrusted_code_mitigations ? PoisoningMitigationLevel::kOn
+ : PoisoningMitigationLevel::kOff,
+ return_count);
DeserializeLazyAssembler::Generate(&state, operand_scale);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index e44289bb6c..675a8bcccc 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -7,8 +7,8 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/factory-inl.h"
#include "src/frames.h"
+#include "src/heap/factory-inl.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
@@ -20,6 +20,8 @@ namespace internal {
namespace interpreter {
using compiler::Node;
+template <typename T>
+using TNode = compiler::TNode<T>;
class IntrinsicsGenerator {
public:
@@ -132,23 +134,21 @@ Node* IntrinsicsGenerator::CompareInstanceType(Node* object, int type,
}
Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
- Node* result =
- __ Select(__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
- [=] {
- return __ SelectBooleanConstant(
- CompareInstanceType(input, type, kInstanceTypeEqual));
- },
- MachineRepresentation::kTagged);
+ TNode<Oddball> result = __ Select<Oddball>(
+ __ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
+ [=] {
+ return __ SelectBooleanConstant(
+ CompareInstanceType(input, type, kInstanceTypeEqual));
+ });
return result;
}
Node* IntrinsicsGenerator::IsJSReceiver(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* input = __ LoadRegisterFromRegisterList(args, 0);
- Node* result = __ Select(
+ TNode<Oddball> result = __ Select<Oddball>(
__ TaggedIsSmi(input), [=] { return __ FalseConstant(); },
- [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); },
- MachineRepresentation::kTagged);
+ [=] { return __ SelectBooleanConstant(__ IsJSReceiver(input)); });
return result;
}
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 0702536b3d..925adac90f 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -9,7 +9,6 @@
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
-#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/counters-inl.h"
#include "src/interpreter/bytecode-generator.h"
@@ -20,28 +19,29 @@
#include "src/parsing/parse-info.h"
#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
+#include "src/unoptimized-compilation-info.h"
#include "src/visitors.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class InterpreterCompilationJob final : public CompilationJob {
+class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
public:
InterpreterCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
ZoneVector<FunctionLiteral*>* eager_inner_literals);
protected:
- Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
- Status FinalizeJobImpl(Isolate* isolate) final;
+ Status FinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) final;
private:
BytecodeGenerator* generator() { return &generator_; }
Zone zone_;
- CompilationInfo compilation_info_;
+ UnoptimizedCompilationInfo compilation_info_;
BytecodeGenerator generator_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
@@ -132,13 +132,13 @@ int Interpreter::InterruptBudget() {
namespace {
-void MaybePrintAst(ParseInfo* parse_info, CompilationInfo* compilation_info) {
+void MaybePrintAst(ParseInfo* parse_info,
+ UnoptimizedCompilationInfo* compilation_info) {
if (!FLAG_print_ast) return;
OFStream os(stdout);
- std::unique_ptr<char[]> name = compilation_info->GetDebugName();
- os << "[generating bytecode for function: "
- << compilation_info->GetDebugName().get() << "]" << std::endl;
+ std::unique_ptr<char[]> name = compilation_info->literal()->GetDebugName();
+ os << "[generating bytecode for function: " << name.get() << "]" << std::endl;
#ifdef DEBUG
os << "--- AST ---" << std::endl
<< AstPrinter(parse_info->stack_limit())
@@ -165,19 +165,13 @@ InterpreterCompilationJob::InterpreterCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
ZoneVector<FunctionLiteral*>* eager_inner_literals)
- : CompilationJob(parse_info->stack_limit(), parse_info, &compilation_info_,
- "Ignition", State::kReadyToExecute),
+ : UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info,
+ &compilation_info_),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
generator_(&compilation_info_, parse_info->ast_string_constants(),
eager_inner_literals) {}
-InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl(
- Isolate* isolate) {
- UNREACHABLE(); // Prepare should always be skipped.
- return SUCCEEDED;
-}
-
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
@@ -200,7 +194,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
- Isolate* isolate) {
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
RuntimeCallTimerScope runtimeTimerScope(
parse_info()->runtime_call_stats(),
RuntimeCallCounterId::kCompileIgnitionFinalization);
@@ -213,22 +207,21 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
return FAILED;
}
- if (ShouldPrintBytecode(compilation_info()->shared_info())) {
+ if (ShouldPrintBytecode(shared_info)) {
OFStream os(stdout);
- std::unique_ptr<char[]> name = compilation_info()->GetDebugName();
- os << "[generated bytecode for function: "
- << compilation_info()->GetDebugName().get() << "]" << std::endl;
+ std::unique_ptr<char[]> name =
+ compilation_info()->literal()->GetDebugName();
+ os << "[generated bytecode for function: " << name.get() << "]"
+ << std::endl;
bytecodes->Disassemble(os);
os << std::flush;
}
compilation_info()->SetBytecodeArray(bytecodes);
- compilation_info()->SetCode(
- BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
return SUCCEEDED;
}
-CompilationJob* Interpreter::NewCompilationJob(
+UnoptimizedCompilationJob* Interpreter::NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
ZoneVector<FunctionLiteral*>* eager_inner_literals) {
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 83dfea89f9..711aea8029 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -21,8 +21,7 @@ namespace internal {
class Isolate;
class BuiltinDeserializerAllocator;
class Callable;
-class CompilationInfo;
-class CompilationJob;
+class UnoptimizedCompilationJob;
class FunctionLiteral;
class ParseInfo;
class RootVisitor;
@@ -45,7 +44,7 @@ class Interpreter {
// Creates a compilation job which will generate bytecode for |literal|.
// Additionally, if |eager_inner_literals| is not null, adds any eagerly
// compilable inner FunctionLiterals to this list.
- static CompilationJob* NewCompilationJob(
+ static UnoptimizedCompilationJob* NewCompilationJob(
ParseInfo* parse_info, FunctionLiteral* literal,
AccountingAllocator* allocator,
ZoneVector<FunctionLiteral*>* eager_inner_literals);
diff --git a/deps/v8/src/interpreter/setup-interpreter-internal.cc b/deps/v8/src/interpreter/setup-interpreter-internal.cc
index 1bba10422c..a4b8b2a33f 100644
--- a/deps/v8/src/interpreter/setup-interpreter-internal.cc
+++ b/deps/v8/src/interpreter/setup-interpreter-internal.cc
@@ -19,7 +19,7 @@ void PrintBuiltinSize(Bytecode bytecode, OperandScale operand_scale,
Handle<Code> code) {
PrintF(stdout, "Ignition Handler, %s, %d\n",
Bytecodes::ToString(bytecode, operand_scale).c_str(),
- code->instruction_size());
+ code->InstructionSize());
}
} // namespace
diff --git a/deps/v8/src/intl.cc b/deps/v8/src/intl.cc
index 0d3c507989..5c2cb4e8fe 100644
--- a/deps/v8/src/intl.cc
+++ b/deps/v8/src/intl.cc
@@ -10,10 +10,11 @@
#include <memory>
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/string-case.h"
+#include "unicode/basictz.h"
#include "unicode/calendar.h"
#include "unicode/gregocal.h"
#include "unicode/timezone.h"
@@ -154,8 +155,10 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
}
}
-MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang) {
+V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
+ Isolate* isolate,
+ bool is_to_upper,
+ const char* lang) {
auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
int32_t src_length = s->length();
int32_t dest_length = src_length;
@@ -203,8 +206,8 @@ MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
// strings and does not allocate. Note that {src} could still be, e.g., a
// one-byte sliced string with a two-byte parent string.
// Called from TF builtins.
-MUST_USE_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
- Isolate* isolate) {
+V8_WARN_UNUSED_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
+ Isolate* isolate) {
DCHECK_EQ(src->length(), dst->length());
DCHECK(src->HasOnlyOneByteChars());
DCHECK(src->IsFlat());
@@ -249,7 +252,8 @@ MUST_USE_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
return dst;
}
-MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
+V8_WARN_UNUSED_RESULT Object* ConvertToLower(Handle<String> s,
+ Isolate* isolate) {
if (!s->HasOnlyOneByteChars()) {
// Use a slower implementation for strings with characters beyond U+00FF.
return LocaleConvertCase(s, isolate, false, "");
@@ -280,7 +284,8 @@ MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
return ConvertOneByteToLower(*s, *result, isolate);
}
-MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
+V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
+ Isolate* isolate) {
int32_t length = s->length();
if (s->HasOnlyOneByteChars() && length > 0) {
Handle<SeqOneByteString> result =
@@ -342,8 +347,8 @@ MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
return LocaleConvertCase(s, isolate, true, "");
}
-MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate) {
+V8_WARN_UNUSED_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate) {
return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
}
@@ -373,23 +378,41 @@ icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
return timezone_;
}
-bool ICUTimezoneCache::GetOffsets(double time_ms, int32_t* raw_offset,
- int32_t* dst_offset) {
+bool ICUTimezoneCache::GetOffsets(double time_ms, bool is_utc,
+ int32_t* raw_offset, int32_t* dst_offset) {
UErrorCode status = U_ZERO_ERROR;
- GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
+ // TODO(jshin): ICU TimeZone class handles skipped time differently from
+ // Ecma 262 (https://github.com/tc39/ecma262/pull/778) and icu::TimeZone
+ // class does not expose the necessary API. Fixing
+ // http://bugs.icu-project.org/trac/ticket/13268 would make it easy to
+ // implement the proposed spec change. A proposed fix for ICU is
+ // https://chromium-review.googlesource.com/851265 .
+ // In the meantime, use an internal (still public) API of icu::BasicTimeZone.
+ // Once it's accepted by the upstream, get rid of cast. Note that casting
+ // TimeZone to BasicTimeZone is safe because we know that icu::TimeZone used
+ // here is a BasicTimeZone.
+ if (is_utc) {
+ GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
+ } else {
+ static_cast<const icu::BasicTimeZone*>(GetTimeZone())
+ ->getOffsetFromLocal(time_ms, icu::BasicTimeZone::kFormer,
+ icu::BasicTimeZone::kFormer, *raw_offset,
+ *dst_offset, status);
+ }
+
return U_SUCCESS(status);
}
double ICUTimezoneCache::DaylightSavingsOffset(double time_ms) {
int32_t raw_offset, dst_offset;
- if (!GetOffsets(time_ms, &raw_offset, &dst_offset)) return 0;
+ if (!GetOffsets(time_ms, true, &raw_offset, &dst_offset)) return 0;
return dst_offset;
}
-double ICUTimezoneCache::LocalTimeOffset() {
+double ICUTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
int32_t raw_offset, dst_offset;
- if (!GetOffsets(icu::Calendar::getNow(), &raw_offset, &dst_offset)) return 0;
- return raw_offset;
+ if (!GetOffsets(time_ms, is_utc, &raw_offset, &dst_offset)) return 0;
+ return raw_offset + dst_offset;
}
void ICUTimezoneCache::Clear() {
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
index 365097106f..967a3e9277 100644
--- a/deps/v8/src/intl.h
+++ b/deps/v8/src/intl.h
@@ -24,15 +24,19 @@ namespace internal {
const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
std::unique_ptr<uc16[]>* dest,
int32_t length);
-MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang);
-MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate);
-MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate);
-MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate);
-
-MUST_USE_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
- Isolate* isolate);
+V8_WARN_UNUSED_RESULT Object* LocaleConvertCase(Handle<String> s,
+ Isolate* isolate,
+ bool is_to_upper,
+ const char* lang);
+V8_WARN_UNUSED_RESULT Object* ConvertToLower(Handle<String> s,
+ Isolate* isolate);
+V8_WARN_UNUSED_RESULT Object* ConvertToUpper(Handle<String> s,
+ Isolate* isolate);
+V8_WARN_UNUSED_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate);
+
+V8_WARN_UNUSED_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
+ Isolate* isolate);
const uint8_t* ToLatin1LowerTable();
@@ -48,14 +52,15 @@ class ICUTimezoneCache : public base::TimezoneCache {
double DaylightSavingsOffset(double time_ms) override;
- double LocalTimeOffset() override;
+ double LocalTimeOffset(double time_ms, bool is_utc) override;
void Clear() override;
private:
icu::TimeZone* GetTimeZone();
- bool GetOffsets(double time_ms, int32_t* raw_offset, int32_t* dst_offset);
+ bool GetOffsets(double time_ms, bool is_utc, int32_t* raw_offset,
+ int32_t* dst_offset);
icu::TimeZone* timezone_;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 9da1aa4110..2d982f009f 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -93,7 +93,9 @@ void Isolate::FireBeforeCallEnteredCallback() {
}
void Isolate::FireMicrotasksCompletedCallback() {
- for (auto& callback : microtasks_completed_callbacks_) {
+ std::vector<MicrotasksCompletedCallback> callbacks(
+ microtasks_completed_callbacks_);
+ for (auto& callback : callbacks) {
callback(reinterpret_cast<v8::Isolate*>(this));
}
}
@@ -131,7 +133,7 @@ bool Isolate::IsArrayConstructorIntact() {
return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsSpeciesLookupChainIntact() {
+bool Isolate::IsArraySpeciesLookupChainIntact() {
// Note: It would be nice to have debug checks to make sure that the
// species protector is accurate, but this would be hard to do for most of
// what the protector stands for:
@@ -144,7 +146,19 @@ bool Isolate::IsSpeciesLookupChainIntact() {
// done here. In place, there are mjsunit tests harmony/array-species* which
// ensure that behavior is correct in various invalid protector cases.
- PropertyCell* species_cell = heap()->species_protector();
+ PropertyCell* species_cell = heap()->array_species_protector();
+ return species_cell->value()->IsSmi() &&
+ Smi::ToInt(species_cell->value()) == kProtectorValid;
+}
+
+bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
+ PropertyCell* species_cell = heap()->typed_array_species_protector();
+ return species_cell->value()->IsSmi() &&
+ Smi::ToInt(species_cell->value()) == kProtectorValid;
+}
+
+bool Isolate::IsPromiseSpeciesLookupChainIntact() {
+ PropertyCell* species_cell = heap()->promise_species_protector();
return species_cell->value()->IsSmi() &&
Smi::ToInt(species_cell->value()) == kProtectorValid;
}
@@ -154,11 +168,6 @@ bool Isolate::IsStringLengthOverflowIntact() {
return string_length_cell->value() == Smi::FromInt(kProtectorValid);
}
-bool Isolate::IsFastArrayIterationIntact() {
- Cell* fast_iteration_cell = heap()->fast_array_iteration_protector();
- return fast_iteration_cell->value() == Smi::FromInt(kProtectorValid);
-}
-
bool Isolate::IsArrayBufferNeuteringIntact() {
PropertyCell* buffer_neutering = heap()->array_buffer_neutering_protector();
return buffer_neutering->value() == Smi::FromInt(kProtectorValid);
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 37a5578763..adb30b12ac 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -6,6 +6,7 @@
#include <stdlib.h>
+#include <atomic>
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
#include <unordered_map>
@@ -22,7 +23,6 @@
#include "src/basic-block-profiler.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
-#include "src/callable.h"
#include "src/cancelable-task.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
@@ -32,11 +32,9 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
-#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/ic/stub-cache.h"
#include "src/instruction-stream.h"
-#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/libsampler/sampler.h"
@@ -69,6 +67,50 @@ namespace internal {
base::Atomic32 ThreadId::highest_thread_id_ = 0;
+#ifdef V8_EMBEDDED_BUILTINS
+extern const uint8_t* DefaultEmbeddedBlob();
+extern uint32_t DefaultEmbeddedBlobSize();
+
+#ifdef V8_MULTI_SNAPSHOTS
+extern const uint8_t* TrustedEmbeddedBlob();
+extern uint32_t TrustedEmbeddedBlobSize();
+#endif
+
+namespace {
+// These variables provide access to the current embedded blob without requiring
+// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
+// not have access to an isolate but still needs to access the embedded blob.
+// The variables are initialized by each isolate in Init(). Writes and reads are
+// relaxed since we can guarantee that the current thread has initialized these
+// variables before accessing them. Different threads may race, but this is fine
+// since they all attempt to set the same values of the blob pointer and size.
+
+std::atomic<const uint8_t*> current_embedded_blob_(nullptr);
+std::atomic<uint32_t> current_embedded_blob_size_(0);
+} // namespace
+
+void Isolate::SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size) {
+ embedded_blob_ = blob;
+ embedded_blob_size_ = blob_size;
+ current_embedded_blob_.store(blob, std::memory_order_relaxed);
+ current_embedded_blob_size_.store(blob_size, std::memory_order_relaxed);
+}
+
+const uint8_t* Isolate::embedded_blob() const { return embedded_blob_; }
+uint32_t Isolate::embedded_blob_size() const { return embedded_blob_size_; }
+
+// static
+const uint8_t* Isolate::CurrentEmbeddedBlob() {
+ return current_embedded_blob_.load(std::memory_order::memory_order_relaxed);
+}
+
+// static
+uint32_t Isolate::CurrentEmbeddedBlobSize() {
+ return current_embedded_blob_size_.load(
+ std::memory_order::memory_order_relaxed);
+}
+#endif // V8_EMBEDDED_BUILTINS
+
int ThreadId::AllocateThreadId() {
int new_id = base::Relaxed_AtomicIncrement(&highest_thread_id_, 1);
return new_id;
@@ -414,8 +456,7 @@ class FrameArrayBuilder {
// Handle a WASM compiled frame.
//====================================================================
const auto& summary = summ.AsWasmCompiled();
- if (!summary.code().IsCodeObject() &&
- summary.code().GetWasmCode()->kind() != wasm::WasmCode::kFunction) {
+ if (summary.code()->kind() != wasm::WasmCode::kFunction) {
continue;
}
Handle<WasmInstanceObject> instance = summary.wasm_instance();
@@ -456,7 +497,7 @@ class FrameArrayBuilder {
Handle<Object> receiver(exit_frame->receiver(), isolate_);
Handle<Code> code(exit_frame->LookupCode(), isolate_);
const int offset =
- static_cast<int>(exit_frame->pc() - code->instruction_start());
+ static_cast<int>(exit_frame->pc() - code->InstructionStart());
int flags = 0;
if (IsStrictFrame(function)) flags |= FrameArray::kIsStrict;
@@ -585,6 +626,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
switch (frame->type()) {
case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION:
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH:
case StackFrame::OPTIMIZED:
case StackFrame::INTERPRETED:
case StackFrame::BUILTIN:
@@ -1291,17 +1333,10 @@ Object* Isolate::UnwindAndFindHandler() {
trap_handler::SetThreadInWasm();
set_wasm_caught_exception(exception);
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode* wasm_code =
- wasm_engine()->code_manager()->LookupCode(frame->pc());
- return FoundHandler(nullptr, wasm_code->instructions().start(),
- offset, wasm_code->constant_pool(), return_sp,
- frame->fp());
- } else {
- Code* code = frame->LookupCode();
- return FoundHandler(nullptr, code->instruction_start(), offset,
- code->constant_pool(), return_sp, frame->fp());
- }
+ wasm::WasmCode* wasm_code =
+ wasm_engine()->code_manager()->LookupCode(frame->pc());
+ return FoundHandler(nullptr, wasm_code->instructions().start(), offset,
+ wasm_code->constant_pool(), return_sp, frame->fp());
}
case StackFrame::OPTIMIZED: {
@@ -1412,6 +1447,20 @@ Object* Isolate::UnwindAndFindHandler() {
interpreter_frame->debug_info()->Unwind(frame->fp());
} break;
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
+ // Builtin continuation frames with catch can handle exceptions.
+ if (!catchable_by_js) break;
+ JavaScriptBuiltinContinuationWithCatchFrame* js_frame =
+ JavaScriptBuiltinContinuationWithCatchFrame::cast(frame);
+ js_frame->SetException(exception);
+
+ // Reconstruct the stack pointer from the frame pointer.
+ Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
+ Code* code = js_frame->LookupCode();
+ return FoundHandler(nullptr, code->InstructionStart(), 0,
+ code->constant_pool(), return_sp, frame->fp());
+ } break;
+
default:
// All other types can not handle exception.
break;
@@ -1525,6 +1574,12 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
if (prediction != NOT_CAUGHT) return prediction;
} break;
+ case StackFrame::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH: {
+ Handle<Code> code(frame->LookupCode());
+ CatchType prediction = ToCatchType(code->GetBuiltinCatchPrediction());
+ if (prediction != NOT_CAUGHT) return prediction;
+ } break;
+
default:
// All other types can not handle exception.
break;
@@ -1595,8 +1650,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
Handle<Object> receiver(frame->receiver(), this);
Handle<JSFunction> function(frame->function(), this);
Handle<AbstractCode> code(AbstractCode::cast(frame->LookupCode()), this);
- const int offset =
- static_cast<int>(frame->pc() - code->instruction_start());
+ const int offset = static_cast<int>(frame->pc() - code->InstructionStart());
JSStackFrame site(this, receiver, function, code, offset);
Handle<String> line = site.ToString().ToHandleChecked();
@@ -1682,11 +1736,9 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
// TODO(titzer): store a reference to the code object in FrameArray;
// a second lookup here could lead to inconsistency.
int byte_offset =
- FLAG_wasm_jit_to_native
- ? FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- compiled_module->GetNativeModule()->GetCode(func_index),
- code_offset)
- : elements->Code(i)->SourcePosition(code_offset);
+ FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ compiled_module->GetNativeModule()->GetCode(func_index),
+ code_offset);
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
@@ -2555,8 +2607,6 @@ void Isolate::TearDown() {
void Isolate::ClearSerializerData() {
- delete external_reference_table_;
- external_reference_table_ = nullptr;
delete external_reference_map_;
external_reference_map_ = nullptr;
}
@@ -2565,8 +2615,6 @@ void Isolate::ClearSerializerData() {
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
- // Make sure that the GC does not post any new tasks.
- heap_.stop_using_tasks();
debug()->Unload();
if (concurrent_recompilation_enabled()) {
@@ -2575,7 +2623,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
- wasm_engine()->compilation_manager()->TearDown();
+ wasm_engine()->TearDown();
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -2595,6 +2643,10 @@ void Isolate::Deinit() {
if (sampler && sampler->IsActive()) sampler->Stop();
FreeThreadResources();
+
+ // We start with the heap tear down so that releasing managed objects does
+ // not cause a GC.
+ heap_.StartTearDown();
// Release managed objects before shutting down the heap. The finalizer might
// need to access heap objects.
ReleaseManagedObjects();
@@ -2624,6 +2676,14 @@ void Isolate::Deinit() {
heap_.TearDown();
logger_->TearDown();
+#ifdef V8_EMBEDDED_BUILTINS
+ if (DefaultEmbeddedBlob() == nullptr && embedded_blob() != nullptr) {
+ // We own the embedded blob. Free it.
+ uint8_t* data = const_cast<uint8_t*>(embedded_blob_);
+ InstructionStream::FreeOffHeapInstructionStream(data, embedded_blob_size_);
+ }
+#endif
+
delete interpreter_;
interpreter_ = nullptr;
@@ -2639,12 +2699,6 @@ void Isolate::Deinit() {
root_index_map_ = nullptr;
ClearSerializerData();
-
- for (InstructionStream* stream : off_heap_code_) {
- CHECK(FLAG_stress_off_heap_code);
- delete stream;
- }
- off_heap_code_.clear();
}
@@ -2738,6 +2792,12 @@ void Isolate::InitializeThreadLocal() {
thread_local_top_.Initialize();
}
+void Isolate::SetTerminationOnExternalTryCatch() {
+ if (try_catch_handler() == nullptr) return;
+ try_catch_handler()->can_continue_ = false;
+ try_catch_handler()->has_terminated_ = true;
+ try_catch_handler()->exception_ = heap()->null_value();
+}
bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
Object* exception = pending_exception();
@@ -2754,9 +2814,7 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
thread_local_top_.external_caught_exception_ = true;
if (!is_catchable_by_javascript(exception)) {
- try_catch_handler()->can_continue_ = false;
- try_catch_handler()->has_terminated_ = true;
- try_catch_handler()->exception_ = heap()->null_value();
+ SetTerminationOnExternalTryCatch();
} else {
v8::TryCatch* handler = try_catch_handler();
DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
@@ -2792,130 +2850,72 @@ void PrintBuiltinSizes(Isolate* isolate) {
const char* name = builtins->name(i);
const char* kind = Builtins::KindNameOf(i);
Code* code = builtins->builtin(i);
- PrintF(stdout, "%s Builtin, %s, %d\n", kind, name,
- code->instruction_size());
+ PrintF(stdout, "%s Builtin, %s, %d\n", kind, name, code->InstructionSize());
}
}
#ifdef V8_EMBEDDED_BUILTINS
-#ifdef DEBUG
-bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate,
- int builtin_index) {
- switch (Builtins::KindOf(builtin_index)) {
- case Builtins::CPP:
- case Builtins::TFC:
- case Builtins::TFH:
- case Builtins::TFJ:
- case Builtins::TFS:
- break;
- case Builtins::API:
- case Builtins::ASM:
- // TODO(jgruber): Extend checks to remaining kinds.
- return false;
- }
-
- Callable callable = Builtins::CallableFor(
- isolate, static_cast<Builtins::Name>(builtin_index));
- CallInterfaceDescriptor descriptor = callable.descriptor();
-
- if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
- return true;
- }
-
- for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
- Register reg = descriptor.GetRegisterParameter(i);
- if (reg == kOffHeapTrampolineRegister) return true;
- }
-
- return false;
-}
-#endif
-
-void ChangeToOffHeapTrampoline(Isolate* isolate, Handle<Code> code,
- InstructionStream* stream) {
- DCHECK(Builtins::IsOffHeapSafe(code->builtin_index()));
- HandleScope scope(isolate);
-
- constexpr size_t buffer_size = 256; // Enough to fit the single jmp.
- byte buffer[buffer_size]; // NOLINT(runtime/arrays)
-
- // Generate replacement code that simply tail-calls the off-heap code.
- MacroAssembler masm(isolate, buffer, buffer_size, CodeObjectRequired::kYes);
- DCHECK(
- !BuiltinAliasesOffHeapTrampolineRegister(isolate, code->builtin_index()));
- DCHECK(!masm.has_frame());
- {
- FrameScope scope(&masm, StackFrame::NONE);
- masm.JumpToInstructionStream(stream);
- }
-
- CodeDesc desc;
- masm.GetCode(isolate, &desc);
-
- // Hack in an empty reloc info to satisfy the GC.
- DCHECK_EQ(0, desc.reloc_size);
- Handle<ByteArray> reloc_info =
- isolate->factory()->NewByteArray(desc.reloc_size, TENURED);
- code->set_relocation_info(*reloc_info);
+void CreateOffHeapTrampolines(Isolate* isolate) {
+ DCHECK(isolate->serializer_enabled());
+ DCHECK_NOT_NULL(isolate->embedded_blob());
+ DCHECK_NE(0, isolate->embedded_blob_size());
- // Overwrites the original code.
- CHECK_LE(desc.instr_size, code->instruction_size());
- CHECK_IMPLIES(code->has_safepoint_info(),
- desc.instr_size <= code->safepoint_table_offset());
- code->CopyFrom(desc);
-
- // TODO(jgruber): CopyFrom isn't intended to overwrite existing code, and
- // doesn't update things like instruction_size. The result is a code object in
- // which the first instructions are overwritten while the rest remain intact
- // (but are never executed). That's fine for our current purposes, just
- // manually zero the trailing part.
-
- DCHECK_LE(desc.instr_size, code->instruction_size());
- byte* trailing_instruction_start =
- code->instruction_start() + desc.instr_size;
- int instruction_size = code->instruction_size();
- if (code->has_safepoint_info()) {
- CHECK_LE(code->safepoint_table_offset(), code->instruction_size());
- instruction_size = code->safepoint_table_offset();
- CHECK_LE(desc.instr_size, instruction_size);
- }
- size_t trailing_instruction_size = instruction_size - desc.instr_size;
- std::memset(trailing_instruction_start, 0, trailing_instruction_size);
-}
-
-void LogInstructionStream(Isolate* isolate, Code* code,
- const InstructionStream* stream) {
- if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- isolate->logger()->LogInstructionStream(code, stream);
- }
-}
-
-void MoveBuiltinsOffHeap(Isolate* isolate) {
- DCHECK(FLAG_stress_off_heap_code);
HandleScope scope(isolate);
Builtins* builtins = isolate->builtins();
- // Lazy deserialization would defeat our off-heap stress test (we'd
- // deserialize later without moving off-heap), so force eager
- // deserialization.
- Snapshot::EnsureAllBuiltinsAreDeserialized(isolate);
+ EmbeddedData d = EmbeddedData::FromBlob();
CodeSpaceMemoryModificationScope code_allocation(isolate->heap());
for (int i = 0; i < Builtins::builtin_count; i++) {
- if (!Builtins::IsOffHeapSafe(i)) continue;
- Handle<Code> code(builtins->builtin(i));
- InstructionStream* stream = new InstructionStream(*code);
- LogInstructionStream(isolate, *code, stream);
- ChangeToOffHeapTrampoline(isolate, code, stream);
- isolate->PushOffHeapCode(stream);
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+
+ const uint8_t* instruction_start = d.InstructionStartOfBuiltin(i);
+ Handle<Code> trampoline = isolate->factory()->NewOffHeapTrampolineFor(
+ builtins->builtin_handle(i), const_cast<Address>(instruction_start));
+
+ // Note that references to the old, on-heap code objects may still exist on
+ // the heap. This is fine for the sake of serialization, as serialization
+ // will replace all of them with a builtin reference which is later
+ // deserialized to point to the object within the builtins table.
+ //
+ // From this point onwards, some builtin code objects may be unreachable and
+ // thus collected by the GC.
+ builtins->set_builtin(i, *trampoline);
+
+ if (isolate->logger()->is_logging_code_events() ||
+ isolate->is_profiling()) {
+ isolate->logger()->LogCodeObject(*trampoline);
+ }
}
}
#endif // V8_EMBEDDED_BUILTINS
} // namespace
+#ifdef V8_EMBEDDED_BUILTINS
+void Isolate::PrepareEmbeddedBlobForSerialization() {
+ // When preparing the embedded blob, ensure it doesn't exist yet.
+ DCHECK_NULL(embedded_blob());
+ DCHECK_NULL(DefaultEmbeddedBlob());
+ DCHECK(serializer_enabled());
+
+ // The isolate takes ownership of this pointer into an executable mmap'd
+ // area. We muck around with const-casts because the standard use-case in
+ // shipping builds is for embedded_blob_ to point into a read-only
+ // .text-embedded section.
+ uint8_t* data;
+ uint32_t size;
+ InstructionStream::CreateOffHeapInstructionStream(this, &data, &size);
+ SetEmbeddedBlob(const_cast<const uint8_t*>(data), size);
+ CreateOffHeapTrampolines(this);
+}
+#endif // V8_EMBEDDED_BUILTINS
+
bool Isolate::Init(StartupDeserializer* des) {
TRACE_ISOLATE(init);
+ base::ElapsedTimer timer;
+ if (des == nullptr && FLAG_profile_deserialization) timer.Start();
+
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
stress_deopt_count_ = FLAG_deopt_every_n_times;
@@ -2966,6 +2966,18 @@ bool Isolate::Init(StartupDeserializer* des) {
compiler_dispatcher_ =
new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
+#ifdef V8_EMBEDDED_BUILTINS
+#ifdef V8_MULTI_SNAPSHOTS
+ if (FLAG_untrusted_code_mitigations) {
+ SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
+ } else {
+ SetEmbeddedBlob(TrustedEmbeddedBlob(), TrustedEmbeddedBlobSize());
+ }
+#else
+ SetEmbeddedBlob(DefaultEmbeddedBlob(), DefaultEmbeddedBlobSize());
+#endif
+#endif
+
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -2985,7 +2997,7 @@ bool Isolate::Init(StartupDeserializer* des) {
// SetUp the object heap.
DCHECK(!heap_.HasBeenSetUp());
if (!heap_.SetUp()) {
- V8::FatalProcessOutOfMemory("heap setup");
+ V8::FatalProcessOutOfMemory(this, "heap setup");
return false;
}
@@ -3013,7 +3025,7 @@ bool Isolate::Init(StartupDeserializer* des) {
}
if (!setup_delegate_->SetupHeap(&heap_)) {
- V8::FatalProcessOutOfMemory("heap object creation");
+ V8::FatalProcessOutOfMemory(this, "heap object creation");
return false;
}
@@ -3021,7 +3033,10 @@ bool Isolate::Init(StartupDeserializer* des) {
// Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.push_back(heap_.undefined_value());
#ifdef V8_EMBEDDED_BUILTINS
- builtins_constants_table_builder_ = new BuiltinsConstantsTableBuilder(this);
+ if (serializer_enabled()) {
+ builtins_constants_table_builder_ =
+ new BuiltinsConstantsTableBuilder(this);
+ }
#endif
}
@@ -3056,7 +3071,7 @@ bool Isolate::Init(StartupDeserializer* des) {
setup_delegate_->SetupInterpreter(interpreter_);
#ifdef V8_EMBEDDED_BUILTINS
- if (create_heap_objects) {
+ if (create_heap_objects && serializer_enabled()) {
builtins_constants_table_builder_->Finalize();
delete builtins_constants_table_builder_;
builtins_constants_table_builder_ = nullptr;
@@ -3070,15 +3085,6 @@ bool Isolate::Init(StartupDeserializer* des) {
if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
-#ifdef V8_EMBEDDED_BUILTINS
- if (FLAG_stress_off_heap_code && !serializer_enabled()) {
- // Artificially move code off-heap to help find & verify related code
- // paths. Lazy deserialization should be off to avoid confusion around
- // replacing just the kDeserializeLazy code object.
- MoveBuiltinsOffHeap(this);
- }
-#endif
-
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
clear_pending_message();
@@ -3107,6 +3113,10 @@ bool Isolate::Init(StartupDeserializer* des) {
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, heap_.external_memory_at_last_mark_compact_)),
Internals::kExternalMemoryAtLastMarkCompactOffset);
+ CHECK_EQ(
+ static_cast<int>(OFFSET_OF(Isolate, heap_.external_reference_table_)),
+ Internals::kIsolateRootsOffset +
+ Heap::kRootsExternalReferenceTableOffset);
{
HandleScope scope(this);
@@ -3133,6 +3143,11 @@ bool Isolate::Init(StartupDeserializer* des) {
sampling_flags);
}
+ if (des == nullptr && FLAG_profile_deserialization) {
+ double ms = timer.Elapsed().InMillisecondsF();
+ PrintF("[Initializing isolate from scratch took %0.3f ms]\n", ms);
+ }
+
return true;
}
@@ -3468,6 +3483,13 @@ bool Isolate::IsPromiseHookProtectorIntact() {
return is_promise_hook_protector_intact;
}
+bool Isolate::IsPromiseResolveLookupChainIntact() {
+ Cell* promise_resolve_cell = heap()->promise_resolve_protector();
+ bool is_promise_resolve_protector_intact =
+ Smi::ToInt(promise_resolve_cell->value()) == kProtectorValid;
+ return is_promise_resolve_protector_intact;
+}
+
bool Isolate::IsPromiseThenLookupChainIntact() {
PropertyCell* promise_then_cell = heap()->promise_then_protector();
bool is_promise_then_protector_intact =
@@ -3511,11 +3533,28 @@ void Isolate::InvalidateArrayConstructorProtector() {
DCHECK(!IsArrayConstructorIntact());
}
-void Isolate::InvalidateSpeciesProtector() {
- DCHECK(factory()->species_protector()->value()->IsSmi());
- DCHECK(IsSpeciesLookupChainIntact());
- factory()->species_protector()->set_value(Smi::FromInt(kProtectorInvalid));
- DCHECK(!IsSpeciesLookupChainIntact());
+void Isolate::InvalidateArraySpeciesProtector() {
+ DCHECK(factory()->array_species_protector()->value()->IsSmi());
+ DCHECK(IsArraySpeciesLookupChainIntact());
+ factory()->array_species_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsArraySpeciesLookupChainIntact());
+}
+
+void Isolate::InvalidateTypedArraySpeciesProtector() {
+ DCHECK(factory()->typed_array_species_protector()->value()->IsSmi());
+ DCHECK(IsTypedArraySpeciesLookupChainIntact());
+ factory()->typed_array_species_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsTypedArraySpeciesLookupChainIntact());
+}
+
+void Isolate::InvalidatePromiseSpeciesProtector() {
+ DCHECK(factory()->promise_species_protector()->value()->IsSmi());
+ DCHECK(IsPromiseSpeciesLookupChainIntact());
+ factory()->promise_species_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsPromiseSpeciesLookupChainIntact());
}
void Isolate::InvalidateStringLengthOverflowProtector() {
@@ -3553,6 +3592,14 @@ void Isolate::InvalidatePromiseHookProtector() {
DCHECK(!IsPromiseHookProtectorIntact());
}
+void Isolate::InvalidatePromiseResolveProtector() {
+ DCHECK(factory()->promise_resolve_protector()->value()->IsSmi());
+ DCHECK(IsPromiseResolveLookupChainIntact());
+ factory()->promise_resolve_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsPromiseResolveLookupChainIntact());
+}
+
void Isolate::InvalidatePromiseThenProtector() {
DCHECK(factory()->promise_then_protector()->value()->IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
@@ -3717,7 +3764,8 @@ void Isolate::FireCallCompletedCallback() {
// Fire callbacks. Increase call depth to prevent recursive callbacks.
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
- for (auto& callback : call_completed_callbacks_) {
+ std::vector<CallCompletedCallback> callbacks(call_completed_callbacks_);
+ for (auto& callback : callbacks) {
callback(reinterpret_cast<v8::Isolate*>(this));
}
}
@@ -3866,10 +3914,13 @@ void Isolate::RunMicrotasks() {
MaybeHandle<Object> maybe_exception;
MaybeHandle<Object> maybe_result = Execution::RunMicrotasks(
this, Execution::MessageHandling::kReport, &maybe_exception);
- // If execution is terminating, just bail out.
+ // If execution is terminating, bail out, clean up, and propagate to
+ // TryCatch scope.
if (maybe_result.is_null() && maybe_exception.is_null()) {
heap()->set_microtask_queue(heap()->empty_fixed_array());
set_pending_microtask_count(0);
+ handle_scope_implementer()->LeaveMicrotaskContext();
+ SetTerminationOnExternalTryCatch();
}
CHECK_EQ(0, pending_microtask_count());
CHECK_EQ(0, heap()->microtask_queue()->length());
@@ -4006,6 +4057,18 @@ void Isolate::PrintWithTimestamp(const char* format, ...) {
va_end(arguments);
}
+void Isolate::SetIdle(bool is_idle) {
+ if (!is_profiling()) return;
+ StateTag state = current_vm_state();
+ DCHECK(state == EXTERNAL || state == IDLE);
+ if (js_entry_sp() != nullptr) return;
+ if (is_idle) {
+ set_current_vm_state(IDLE);
+ } else if (state == IDLE) {
+ set_current_vm_state(EXTERNAL);
+ }
+}
+
bool StackLimitCheck::JsHasOverflowed(uintptr_t gap) const {
StackGuard* stack_guard = isolate_->stack_guard();
#ifdef USE_SIMULATOR
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 40135ef324..75b447f162 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -11,6 +11,7 @@
#include <unordered_map>
#include <vector>
+#include "include/v8-inspector.h"
#include "include/v8.h"
#include "src/allocation.h"
#include "src/base/atomicops.h"
@@ -26,6 +27,7 @@
#include "src/heap/heap.h"
#include "src/messages.h"
#include "src/objects/code.h"
+#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
#include "src/unicode.h"
@@ -71,7 +73,6 @@ class DescriptorLookupCache;
class EmptyStatement;
class EternalHandles;
class ExternalCallbackScope;
-class ExternalReferenceTable;
class Factory;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
@@ -423,7 +424,6 @@ typedef std::vector<HeapObject*> DebugObjectCache;
V(Relocatable*, relocatable_top, nullptr) \
V(DebugObjectCache*, string_stream_debug_object_cache, nullptr) \
V(Object*, string_stream_current_security_token, nullptr) \
- V(ExternalReferenceTable*, external_reference_table, nullptr) \
V(const intptr_t*, api_external_references, nullptr) \
V(AddressToIndexHashMap*, external_reference_map, nullptr) \
V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
@@ -440,12 +440,13 @@ typedef std::vector<HeapObject*> DebugObjectCache;
/* true if a trace is being formatted through Error.prepareStackTrace. */ \
V(bool, formatting_stack_trace, false) \
/* Perform side effect checks on function call and API callbacks. */ \
- V(bool, needs_side_effect_check, false) \
+ V(DebugInfo::ExecutionMode, debug_execution_mode, DebugInfo::kBreakpoints) \
/* Current code coverage mode */ \
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
V(debug::TypeProfile::Mode, type_profile_mode, debug::TypeProfile::kNone) \
V(int, last_stack_frame_info_id, 0) \
- V(int, last_console_context_id, 0)
+ V(int, last_console_context_id, 0) \
+ V(v8_inspector::V8Inspector*, inspector, nullptr)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
@@ -752,8 +753,8 @@ class Isolate {
Object* ThrowIllegalOperation();
template <typename T>
- MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
- MessageLocation* location = nullptr) {
+ V8_WARN_UNUSED_RESULT MaybeHandle<T> Throw(
+ Handle<Object> exception, MessageLocation* location = nullptr) {
Throw(*exception, location);
return MaybeHandle<T>();
}
@@ -1075,15 +1076,14 @@ class Isolate {
bool IsNoElementsProtectorIntact(Context* context);
bool IsNoElementsProtectorIntact();
- inline bool IsSpeciesLookupChainIntact();
+ inline bool IsArraySpeciesLookupChainIntact();
+ inline bool IsTypedArraySpeciesLookupChainIntact();
+ inline bool IsPromiseSpeciesLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
inline bool IsStringLengthOverflowIntact();
inline bool IsArrayIteratorLookupChainIntact();
- // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
- inline bool IsFastArrayIterationIntact();
-
// Make sure we do check for neutered array buffers.
inline bool IsArrayBufferNeuteringIntact();
@@ -1091,8 +1091,15 @@ class Isolate {
// active.
bool IsPromiseHookProtectorIntact();
+ // Make sure a lookup of "resolve" on the %Promise% intrinsic object
+ // yeidls the initial Promise.resolve method.
+ bool IsPromiseResolveLookupChainIntact();
+
// Make sure a lookup of "then" on any JSPromise whose [[Prototype]] is the
- // initial %PromisePrototype% yields the initial method.
+ // initial %PromisePrototype% yields the initial method. In addition this
+ // protector also guards the negative lookup of "then" on the intrinsic
+ // %ObjectPrototype%, meaning that such lookups are guaranteed to yield
+ // undefined without triggering any side-effects.
bool IsPromiseThenLookupChainIntact();
bool IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver);
@@ -1111,12 +1118,15 @@ class Isolate {
UpdateNoElementsProtectorOnSetElement(object);
}
void InvalidateArrayConstructorProtector();
- void InvalidateSpeciesProtector();
+ void InvalidateArraySpeciesProtector();
+ void InvalidateTypedArraySpeciesProtector();
+ void InvalidatePromiseSpeciesProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
void InvalidateArrayBufferNeuteringProtector();
V8_EXPORT_PRIVATE void InvalidatePromiseHookProtector();
+ void InvalidatePromiseResolveProtector();
void InvalidatePromiseThenProtector();
// Returns true if array is the initial array prototype in any native context.
@@ -1184,6 +1194,9 @@ class Isolate {
return id;
}
+ void AddNearHeapLimitCallback(v8::NearHeapLimitCallback, void* data);
+ void RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
+ size_t heap_limit);
void AddCallCompletedCallback(CallCompletedCallback callback);
void RemoveCallCompletedCallback(CallCompletedCallback callback);
void FireCallCompletedCallback();
@@ -1231,6 +1244,10 @@ class Isolate {
return reinterpret_cast<Address>(&handle_scope_implementer_);
}
+ Address debug_execution_mode_address() {
+ return reinterpret_cast<Address>(&debug_execution_mode_);
+ }
+
void DebugStateUpdated();
void SetPromiseHook(PromiseHook hook);
@@ -1244,14 +1261,22 @@ class Isolate {
return &partial_snapshot_cache_;
}
- void PushOffHeapCode(InstructionStream* stream) {
- off_heap_code_.emplace_back(stream);
- }
-
#ifdef V8_EMBEDDED_BUILTINS
+ // Called only prior to serialization.
+ // This function copies off-heap-safe builtins off the heap, creates off-heap
+ // trampolines, and sets up this isolate's embedded blob.
+ void PrepareEmbeddedBlobForSerialization();
+
BuiltinsConstantsTableBuilder* builtins_constants_table_builder() const {
return builtins_constants_table_builder_;
}
+
+ static const uint8_t* CurrentEmbeddedBlob();
+ static uint32_t CurrentEmbeddedBlobSize();
+
+ // TODO(jgruber): Remove these in favor of the static methods above.
+ const uint8_t* embedded_blob() const;
+ uint32_t embedded_blob_size() const;
#endif
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
@@ -1357,6 +1382,8 @@ class Isolate {
top_backup_incumbent_scope_ = top_backup_incumbent_scope;
}
+ void SetIdle(bool is_idle);
+
protected:
explicit Isolate(bool enable_serializer);
bool IsArrayOrObjectOrStringPrototype(Object* object);
@@ -1467,6 +1494,8 @@ class Isolate {
// then return true.
bool PropagatePendingExceptionToExternalTryCatch();
+ void SetTerminationOnExternalTryCatch();
+
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1628,16 +1657,15 @@ class Isolate {
std::vector<Object*> partial_snapshot_cache_;
- // Stores off-heap instruction streams. Only used if --stress-off-heap-code
- // is enabled.
- // TODO(jgruber,v8:6666): Remove once isolate-independent builtins are
- // implemented. Also remove friend class below.
- std::vector<InstructionStream*> off_heap_code_;
-
#ifdef V8_EMBEDDED_BUILTINS
// Used during builtins compilation to build the builtins constants table,
// which is stored on the root list prior to serialization.
BuiltinsConstantsTableBuilder* builtins_constants_table_builder_ = nullptr;
+
+ void SetEmbeddedBlob(const uint8_t* blob, uint32_t blob_size);
+
+ const uint8_t* embedded_blob_ = nullptr;
+ uint32_t embedded_blob_size_ = 0;
#endif
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
@@ -1668,7 +1696,6 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class heap::HeapTester;
- friend class InstructionStream;
friend class OptimizingCompileDispatcher;
friend class Simulator;
friend class StackGuard;
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 46096a0ba5..5b393263da 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -11,8 +11,6 @@
// -------------------------------------------------------------------
// Imports
-var GetIterator;
-var GetMethod;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
var MathMax = global.Math.max;
@@ -22,11 +20,6 @@ var ObjectToString = global.Object.prototype.toString;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
-utils.Import(function(from) {
- GetIterator = from.GetIterator;
- GetMethod = from.GetMethod;
-});
-
// -------------------------------------------------------------------
macro IS_PROXY(arg)
@@ -585,46 +578,14 @@ function ArrayUnshiftFallback(arg1) { // length == 1
}
+// Oh the humanity... don't remove the following function because js2c for some
+// reason gets symbol minifiation wrong if it's not there. Instead of spending
+// the time fixing js2c (which will go away when all of the internal .js runtime
+// files are gone), just keep this work-around.
function ArraySliceFallback(start, end) {
- var array = TO_OBJECT(this);
- var len = TO_LENGTH(array.length);
- var start_i = TO_INTEGER(start);
- var end_i = len;
-
- if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end);
-
- if (start_i < 0) {
- start_i += len;
- if (start_i < 0) start_i = 0;
- } else {
- if (start_i > len) start_i = len;
- }
-
- if (end_i < 0) {
- end_i += len;
- if (end_i < 0) end_i = 0;
- } else {
- if (end_i > len) end_i = len;
- }
-
- var result = ArraySpeciesCreate(array, MathMax(end_i - start_i, 0));
-
- if (end_i < start_i) return result;
-
- if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
- %NormalizeElements(array);
- if (IS_ARRAY(result)) %NormalizeElements(result);
- SparseSlice(array, start_i, end_i - start_i, len, result);
- } else {
- SimpleSlice(array, start_i, end_i - start_i, len, result);
- }
-
- result.length = end_i - start_i;
-
- return result;
+ return null;
}
-
function ComputeSpliceStartIndex(start_i, len) {
if (start_i < 0) {
start_i += len;
@@ -1236,7 +1197,6 @@ utils.Export(function(to) {
"array_push", ArrayPushFallback,
"array_shift", ArrayShiftFallback,
"array_splice", ArraySpliceFallback,
- "array_slice", ArraySliceFallback,
"array_unshift", ArrayUnshiftFallback,
]);
diff --git a/deps/v8/src/js/intl.js b/deps/v8/src/js/intl.js
index 3c7112716c..53fbe1f947 100644
--- a/deps/v8/src/js/intl.js
+++ b/deps/v8/src/js/intl.js
@@ -152,11 +152,18 @@ var AVAILABLE_LOCALES = {
*/
var DEFAULT_ICU_LOCALE = UNDEFINED;
-function GetDefaultICULocaleJS() {
+function GetDefaultICULocaleJS(service) {
if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
}
- return DEFAULT_ICU_LOCALE;
+ // Check that this is a valid default for this service,
+ // otherwise fall back to "und"
+ // TODO(littledan,jshin): AvailableLocalesOf sometimes excludes locales
+ // which don't require tailoring, but work fine with root data. Look into
+ // exposing this fact in ICU or the way Chrome bundles data.
+ return (IS_UNDEFINED(service) ||
+ HAS_OWN_PROPERTY(getAvailableLocalesOf(service), DEFAULT_ICU_LOCALE))
+ ? DEFAULT_ICU_LOCALE : "und";
}
/**
@@ -428,48 +435,6 @@ function resolveLocale(service, requestedLocales, options) {
/**
- * Look up the longest non-empty prefix of |locale| that is an element of
- * |availableLocales|. Returns undefined when the |locale| is completely
- * unsupported by |availableLocales|.
- */
-function bestAvailableLocale(availableLocales, locale) {
- do {
- if (!IS_UNDEFINED(availableLocales[locale])) {
- return locale;
- }
- // Truncate locale if possible.
- var pos = %StringLastIndexOf(locale, '-');
- if (pos === -1) {
- break;
- }
- locale = %_Call(StringSubstring, locale, 0, pos);
- } while (true);
-
- return UNDEFINED;
-}
-
-
-/**
- * Try to match any mutation of |requestedLocale| against |availableLocales|.
- */
-function attemptSingleLookup(availableLocales, requestedLocale) {
- // Remove all extensions.
- var noExtensionsLocale = %RegExpInternalReplace(
- GetAnyExtensionRE(), requestedLocale, '');
- var availableLocale = bestAvailableLocale(
- availableLocales, requestedLocale);
- if (!IS_UNDEFINED(availableLocale)) {
- // Return the resolved locale and extension.
- var extensionMatch = %regexp_internal_match(
- GetUnicodeExtensionRE(), requestedLocale);
- var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
- return {locale: availableLocale, extension: extension};
- }
- return UNDEFINED;
-}
-
-
-/**
* Returns best matched supported locale and extension info using basic
* lookup algorithm.
*/
@@ -481,25 +446,31 @@ function lookupMatcher(service, requestedLocales) {
var availableLocales = getAvailableLocalesOf(service);
for (var i = 0; i < requestedLocales.length; ++i) {
- var result = attemptSingleLookup(availableLocales, requestedLocales[i]);
- if (!IS_UNDEFINED(result)) {
- return result;
- }
- }
-
- var defLocale = GetDefaultICULocaleJS();
-
- // While ECMA-402 returns defLocale directly, we have to check if it is
- // supported, as such support is not guaranteed.
- var result = attemptSingleLookup(availableLocales, defLocale);
- if (!IS_UNDEFINED(result)) {
- return result;
+ // Remove all extensions.
+ var locale = %RegExpInternalReplace(
+ GetAnyExtensionRE(), requestedLocales[i], '');
+ do {
+ if (!IS_UNDEFINED(availableLocales[locale])) {
+ // Return the resolved locale and extension.
+ var extensionMatch = %regexp_internal_match(
+ GetUnicodeExtensionRE(), requestedLocales[i]);
+ var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
+ return {locale: locale, extension: extension, position: i};
+ }
+ // Truncate locale if possible.
+ var pos = %StringLastIndexOf(locale, '-');
+ if (pos === -1) {
+ break;
+ }
+ locale = %_Call(StringSubstring, locale, 0, pos);
+ } while (true);
}
// Didn't find a match, return default.
return {
- locale: 'und',
- extension: ''
+ locale: GetDefaultICULocaleJS(service),
+ extension: '',
+ position: -1
};
}
diff --git a/deps/v8/src/js/messages.js b/deps/v8/src/js/messages.js
deleted file mode 100644
index aebd37a791..0000000000
--- a/deps/v8/src/js/messages.js
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// -------------------------------------------------------------------
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var Script = utils.ImportNow("Script");
-
-// -------------------------------------------------------------------
-// Script
-
-/**
- * Get information on a specific source position.
- * Returns an object with the following following properties:
- * script : script object for the source
- * line : source line number
- * column : source column within the line
- * position : position within the source
- * sourceText : a string containing the current line
- * @param {number} position The source position
- * @param {boolean} include_resource_offset Set to true to have the resource
- * offset added to the location
- * @return If line is negative or not in the source null is returned.
- */
-function ScriptLocationFromPosition(position,
- include_resource_offset) {
- return %ScriptPositionInfo(this, position, !!include_resource_offset);
-}
-
-
-/**
- * If sourceURL comment is available returns sourceURL comment contents.
- * Otherwise, script name is returned. See
- * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
- * and Source Map Revision 3 proposal for details on using //# sourceURL and
- * deprecated //@ sourceURL comment to identify scripts that don't have name.
- *
- * @return {?string} script name if present, value for //# sourceURL comment or
- * deprecated //@ sourceURL comment otherwise.
- */
-function ScriptNameOrSourceURL() {
- // Keep in sync with Script::GetNameOrSourceURL.
- if (this.source_url) return this.source_url;
- return this.name;
-}
-
-
-utils.SetUpLockedPrototype(Script, [
- "source",
- "name",
- "source_url",
- "source_mapping_url",
- "line_offset",
- "column_offset"
- ], [
- "locationFromPosition", ScriptLocationFromPosition,
- "nameOrSourceURL", ScriptNameOrSourceURL,
- ]
-);
-
-});
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 0d6b670367..bc2652129d 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -99,42 +99,6 @@ function PostNatives(utils) {
utils.PostNatives = UNDEFINED;
}
-// ----------------------------------------------------------------------------
-// Object
-
-var iteratorSymbol = ImportNow("iterator_symbol");
-
-// ES6 7.3.9
-function GetMethod(obj, p) {
- var func = obj[p];
- if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
- if (IS_CALLABLE(func)) return func;
- throw %make_type_error(kCalledNonCallable, typeof func);
-}
-
-// ----------------------------------------------------------------------------
-// Iterator related spec functions.
-
-// ES6 7.4.1 GetIterator(obj, method)
-function GetIterator(obj, method) {
- if (IS_UNDEFINED(method)) {
- method = obj[iteratorSymbol];
- }
- if (!IS_CALLABLE(method)) {
- throw %make_type_error(kNotIterable, obj);
- }
- var iterator = %_Call(method, obj);
- if (!IS_RECEIVER(iterator)) {
- throw %make_type_error(kNotAnIterator, iterator);
- }
- return iterator;
-}
-
-
-exports_container.GetIterator = GetIterator;
-exports_container.GetMethod = GetMethod;
-
-
// -----------------------------------------------------------------------
%OptimizeObjectForAddingMultipleProperties(utils, 14);
diff --git a/deps/v8/src/js/spread.js b/deps/v8/src/js/spread.js
deleted file mode 100644
index 0b56ca7edd..0000000000
--- a/deps/v8/src/js/spread.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-// -------------------------------------------------------------------
-// Imports
-var InternalArray = utils.InternalArray;
-
-// -------------------------------------------------------------------
-
-function SpreadArguments() {
- var count = arguments.length;
- var args = new InternalArray();
-
- for (var i = 0; i < count; ++i) {
- var array = arguments[i];
- var length = array.length;
- for (var j = 0; j < length; ++j) {
- args.push(array[j]);
- }
- }
-
- return args;
-}
-
-
-function SpreadIterable(collection) {
- if (IS_NULL_OR_UNDEFINED(collection)) {
- throw %make_type_error(kNotIterable, collection);
- }
-
- var args = new InternalArray();
- for (var value of collection) {
- args.push(value);
- }
- return args;
-}
-
-// ----------------------------------------------------------------------------
-// Exports
-
-%InstallToContext([
- "spread_arguments", SpreadArguments,
- "spread_iterable", SpreadIterable,
-]);
-
-})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 18998cf9be..c398766431 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -13,13 +13,9 @@
// array.js has to come before typedarray.js for this to work
var ArrayToString = utils.ImportNow("ArrayToString");
-var GetIterator;
-var GetMethod;
var InnerArrayJoin;
var InnerArraySort;
var InnerArrayToLocaleString;
-var InternalArray = utils.InternalArray;
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array, 1)
@@ -48,8 +44,6 @@ endmacro
var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
utils.Import(function(from) {
- GetIterator = from.GetIterator;
- GetMethod = from.GetMethod;
InnerArrayJoin = from.InnerArrayJoin;
InnerArraySort = from.InnerArraySort;
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
@@ -65,19 +59,6 @@ function ValidateTypedArray(array, methodName) {
throw %make_type_error(kDetachedOperation, methodName);
}
-function TypedArrayCreate(constructor, arg0, arg1, arg2) {
- if (IS_UNDEFINED(arg1)) {
- var newTypedArray = new constructor(arg0);
- } else {
- var newTypedArray = new constructor(arg0, arg1, arg2);
- }
- ValidateTypedArray(newTypedArray, "TypedArrayCreate");
- if (IS_NUMBER(arg0) && %_TypedArrayGetLength(newTypedArray) < arg0) {
- throw %make_type_error(kTypedArrayTooShort);
- }
- return newTypedArray;
-}
-
// ES6 draft 05-18-15, section 22.2.3.25
DEFINE_METHOD(
GlobalTypedArray.prototype,
@@ -124,16 +105,8 @@ DEFINE_METHOD(
}
);
-// TODO(bmeurer): Migrate this to a proper builtin.
-function TypedArrayConstructor() {
- throw %make_type_error(kConstructAbstractClass, "TypedArray");
-}
-
// -------------------------------------------------------------------
-%SetCode(GlobalTypedArray, TypedArrayConstructor);
-
-
%AddNamedProperty(GlobalTypedArray.prototype, "toString", ArrayToString,
DONT_ENUM);
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 2d9593091d..fa40731f45 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -7,7 +7,6 @@
#include "src/char-predicates-inl.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
-#include "src/factory.h"
#include "src/field-type.h"
#include "src/messages.h"
#include "src/objects-inl.h"
@@ -15,7 +14,6 @@
#include "src/string-hasher.h"
#include "src/transitions.h"
#include "src/unicode-cache.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -419,9 +417,10 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
key = ParseJsonString();
if (key.is_null()) return ReportUnexpectedCharacter();
- target = TransitionsAccessor(map).FindTransitionToField(key);
// If a transition was found, follow it and continue.
- transitioning = !target.is_null();
+ transitioning =
+ TransitionsAccessor(map).FindTransitionToField(key).ToHandle(
+ &target);
}
if (c0_ != ':') return ReportUnexpectedCharacter();
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index d76f642b38..fc60e29ec9 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -5,8 +5,10 @@
#ifndef V8_JSON_PARSER_H_
#define V8_JSON_PARSER_H_
-#include "src/factory.h"
+#include "src/heap/factory.h"
+#include "src/isolate.h"
#include "src/objects.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -36,9 +38,8 @@ class JsonParseInternalizer BASE_EMBEDDED {
template <bool seq_one_byte>
class JsonParser BASE_EMBEDDED {
public:
- MUST_USE_RESULT static MaybeHandle<Object> Parse(Isolate* isolate,
- Handle<String> source,
- Handle<Object> reviver) {
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
+ Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
PostponeInterruptsScope no_debug_breaks(isolate, StackGuard::DEBUGBREAK);
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index e72bd9d3d2..67cc8ea843 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -17,9 +17,9 @@ class JsonStringifier BASE_EMBEDDED {
~JsonStringifier() { DeleteArray(gap_); }
- MUST_USE_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
- Handle<Object> replacer,
- Handle<Object> gap);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Stringify(Handle<Object> object,
+ Handle<Object> replacer,
+ Handle<Object> gap);
private:
enum Result { UNCHANGED, SUCCESS, EXCEPTION };
@@ -27,10 +27,9 @@ class JsonStringifier BASE_EMBEDDED {
bool InitializeReplacer(Handle<Object> replacer);
bool InitializeGap(Handle<Object> gap);
- MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction(
- Handle<Object> object,
- Handle<Object> key);
- MUST_USE_RESULT MaybeHandle<Object> ApplyReplacerFunction(
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyToJsonFunction(
+ Handle<Object> object, Handle<Object> key);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> ApplyReplacerFunction(
Handle<Object> value, Handle<Object> key, Handle<Object> initial_holder);
// Entry point to serialize the object.
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 638c83f427..7af093744e 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -6,7 +6,7 @@
#include "src/api-arguments-inl.h"
#include "src/elements.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/identity-map.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
@@ -660,6 +660,16 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
}
+ if (object->IsJSModuleNamespace()) {
+ // Simulate [[GetOwnProperty]] for establishing enumerability, which
+ // throws for uninitialized exports.
+ for (int i = 0, n = enum_keys->length(); i < n; ++i) {
+ Handle<String> key(String::cast(enum_keys->get(i)), isolate_);
+ if (Handle<JSModuleNamespace>::cast(object)->GetExport(key).is_null()) {
+ return Nothing<bool>();
+ }
+ }
+ }
AddKeys(enum_keys, DO_NOT_CONVERT);
} else {
if (object->HasFastProperties()) {
diff --git a/deps/v8/src/keys.h b/deps/v8/src/keys.h
index 0db12d96ba..bd4f048c69 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/keys.h
@@ -55,8 +55,11 @@ class KeyAccumulator final BASE_EMBEDDED {
// Might return directly the object's enum_cache, copy the result before using
// as an elements backing store for a JSObject.
+ // Does not throw for uninitialized exports in module namespace objects, so
+ // this has to be checked separately.
static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object);
+
void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 0e6869805c..1332a5efaa 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -130,10 +130,10 @@ class LayoutDescriptor : public ByteArray {
V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
int* layout_bit_index);
- V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetRawData(int field_index);
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor* SetRawData(int field_index);
- V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetTagged(int field_index,
- bool tagged);
+ V8_INLINE V8_WARN_UNUSED_RESULT LayoutDescriptor* SetTagged(int field_index,
+ bool tagged);
};
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.cc b/deps/v8/src/libplatform/default-foreground-task-runner.cc
index c9cb5fa4d7..140dd404ec 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.cc
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.cc
@@ -12,9 +12,7 @@ namespace platform {
DefaultForegroundTaskRunner::DefaultForegroundTaskRunner(
IdleTaskSupport idle_task_support, TimeFunction time_function)
- : event_loop_control_(0),
- idle_task_support_(idle_task_support),
- time_function_(time_function) {}
+ : idle_task_support_(idle_task_support), time_function_(time_function) {}
void DefaultForegroundTaskRunner::Terminate() {
base::LockGuard<base::Mutex> guard(&lock_);
@@ -27,10 +25,10 @@ void DefaultForegroundTaskRunner::Terminate() {
}
void DefaultForegroundTaskRunner::PostTaskLocked(
- std::unique_ptr<Task> task, const base::LockGuard<base::Mutex>& guard) {
+ std::unique_ptr<Task> task, const base::LockGuard<base::Mutex>&) {
if (terminated_) return;
task_queue_.push(std::move(task));
- event_loop_control_.Signal();
+ event_loop_control_.NotifyOne();
}
void DefaultForegroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
@@ -62,7 +60,8 @@ bool DefaultForegroundTaskRunner::IdleTasksEnabled() {
return idle_task_support_ == IdleTaskSupport::kEnabled;
}
-std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue() {
+std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue(
+ MessageLoopBehavior wait_for_work) {
base::LockGuard<base::Mutex> guard(&lock_);
// Move delayed tasks that hit their deadline to the main queue.
std::unique_ptr<Task> task = PopTaskFromDelayedQueueLocked(guard);
@@ -71,7 +70,10 @@ std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue() {
task = PopTaskFromDelayedQueueLocked(guard);
}
- if (task_queue_.empty()) return {};
+ while (task_queue_.empty()) {
+ if (wait_for_work == MessageLoopBehavior::kDoNotWait) return {};
+ WaitForTaskLocked(guard);
+ }
task = std::move(task_queue_.front());
task_queue_.pop();
@@ -81,7 +83,7 @@ std::unique_ptr<Task> DefaultForegroundTaskRunner::PopTaskFromQueue() {
std::unique_ptr<Task>
DefaultForegroundTaskRunner::PopTaskFromDelayedQueueLocked(
- const base::LockGuard<base::Mutex>& guard) {
+ const base::LockGuard<base::Mutex>&) {
if (delayed_task_queue_.empty()) return {};
double now = MonotonicallyIncreasingTime();
@@ -109,7 +111,10 @@ std::unique_ptr<IdleTask> DefaultForegroundTaskRunner::PopTaskFromIdleQueue() {
return task;
}
-void DefaultForegroundTaskRunner::WaitForTask() { event_loop_control_.Wait(); }
+void DefaultForegroundTaskRunner::WaitForTaskLocked(
+ const base::LockGuard<base::Mutex>&) {
+ event_loop_control_.Wait(&lock_);
+}
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-foreground-task-runner.h b/deps/v8/src/libplatform/default-foreground-task-runner.h
index 7dfb487828..a0869d0bc7 100644
--- a/deps/v8/src/libplatform/default-foreground-task-runner.h
+++ b/deps/v8/src/libplatform/default-foreground-task-runner.h
@@ -9,8 +9,8 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
+#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
-#include "src/base/platform/semaphore.h"
namespace v8 {
namespace platform {
@@ -25,11 +25,11 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
void Terminate();
- std::unique_ptr<Task> PopTaskFromQueue();
+ std::unique_ptr<Task> PopTaskFromQueue(MessageLoopBehavior wait_for_work);
std::unique_ptr<IdleTask> PopTaskFromIdleQueue();
- void WaitForTask();
+ void WaitForTaskLocked(const base::LockGuard<base::Mutex>&);
double MonotonicallyIncreasingTime();
@@ -47,16 +47,16 @@ class V8_PLATFORM_EXPORT DefaultForegroundTaskRunner
// The same as PostTask, but the lock is already held by the caller. The
// {guard} parameter should make sure that the caller is holding the lock.
void PostTaskLocked(std::unique_ptr<Task> task,
- const base::LockGuard<base::Mutex>& guard);
+ const base::LockGuard<base::Mutex>&);
// A caller of this function has to hold {lock_}. The {guard} parameter should
// make sure that the caller is holding the lock.
std::unique_ptr<Task> PopTaskFromDelayedQueueLocked(
- const base::LockGuard<base::Mutex>& guard);
+ const base::LockGuard<base::Mutex>&);
bool terminated_ = false;
base::Mutex lock_;
- base::Semaphore event_loop_control_;
+ base::ConditionVariable event_loop_control_;
std::queue<std::unique_ptr<Task>> task_queue_;
IdleTaskSupport idle_task_support_;
std::queue<std::unique_ptr<IdleTask>> idle_task_queue_;
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 39d9525eff..74975e71fa 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -14,8 +14,8 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
-#include "src/libplatform/default-background-task-runner.h"
#include "src/libplatform/default-foreground-task-runner.h"
+#include "src/libplatform/default-worker-threads-task-runner.h"
namespace v8 {
namespace platform {
@@ -96,7 +96,7 @@ DefaultPlatform::DefaultPlatform(
DefaultPlatform::~DefaultPlatform() {
base::LockGuard<base::Mutex> guard(&lock_);
- if (background_task_runner_) background_task_runner_->Terminate();
+ if (worker_threads_task_runner_) worker_threads_task_runner_->Terminate();
for (auto it : foreground_task_runner_map_) {
it.second->Terminate();
}
@@ -114,9 +114,9 @@ void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
void DefaultPlatform::EnsureBackgroundTaskRunnerInitialized() {
base::LockGuard<base::Mutex> guard(&lock_);
- if (!background_task_runner_) {
- background_task_runner_ =
- std::make_shared<DefaultBackgroundTaskRunner>(thread_pool_size_);
+ if (!worker_threads_task_runner_) {
+ worker_threads_task_runner_ =
+ std::make_shared<DefaultWorkerThreadsTaskRunner>(thread_pool_size_);
}
}
@@ -138,8 +138,8 @@ void DefaultPlatform::SetTimeFunctionForTesting(
}
bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
- MessageLoopBehavior behavior) {
- bool failed_result = behavior == MessageLoopBehavior::kWaitForWork;
+ MessageLoopBehavior wait_for_work) {
+ bool failed_result = wait_for_work == MessageLoopBehavior::kWaitForWork;
std::shared_ptr<DefaultForegroundTaskRunner> task_runner;
{
base::LockGuard<base::Mutex> guard(&lock_);
@@ -149,11 +149,8 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
}
task_runner = foreground_task_runner_map_[isolate];
}
- if (behavior == MessageLoopBehavior::kWaitForWork) {
- task_runner->WaitForTask();
- }
- std::unique_ptr<Task> task = task_runner->PopTaskFromQueue();
+ std::unique_ptr<Task> task = task_runner->PopTaskFromQueue(wait_for_work);
if (!task) return failed_result;
task->Run();
@@ -196,15 +193,14 @@ std::shared_ptr<TaskRunner> DefaultPlatform::GetForegroundTaskRunner(
return foreground_task_runner_map_[isolate];
}
-std::shared_ptr<TaskRunner> DefaultPlatform::GetBackgroundTaskRunner(
+std::shared_ptr<TaskRunner> DefaultPlatform::GetWorkerThreadsTaskRunner(
v8::Isolate*) {
EnsureBackgroundTaskRunnerInitialized();
- return background_task_runner_;
+ return worker_threads_task_runner_;
}
-void DefaultPlatform::CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) {
- GetBackgroundTaskRunner(nullptr)->PostTask(std::unique_ptr<Task>(task));
+void DefaultPlatform::CallOnWorkerThread(std::unique_ptr<Task> task) {
+ GetWorkerThreadsTaskRunner(nullptr)->PostTask(std::move(task));
}
void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
@@ -248,9 +244,7 @@ void DefaultPlatform::SetTracingController(
tracing_controller_ = std::move(tracing_controller);
}
-size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
- return static_cast<size_t>(thread_pool_size_);
-}
+int DefaultPlatform::NumberOfWorkerThreads() { return thread_pool_size_; }
Platform::StackTracePrinter DefaultPlatform::GetStackTracePrinter() {
return PrintStackTrace;
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index b73f38a5fe..77a7a86586 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -26,7 +26,7 @@ namespace platform {
class Thread;
class WorkerThread;
class DefaultForegroundTaskRunner;
-class DefaultBackgroundTaskRunner;
+class DefaultWorkerThreadsTaskRunner;
class DefaultPageAllocator;
class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
@@ -55,13 +55,12 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void SetTimeFunctionForTesting(TimeFunction time_function);
// v8::Platform implementation.
- size_t NumberOfAvailableBackgroundThreads() override;
+ int NumberOfWorkerThreads() override;
std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override;
- std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
+ std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override;
- void CallOnBackgroundThread(Task* task,
- ExpectedRuntime expected_runtime) override;
+ void CallOnWorkerThread(std::unique_ptr<Task> task) override;
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) override;
@@ -79,7 +78,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
base::Mutex lock_;
int thread_pool_size_;
IdleTaskSupport idle_task_support_;
- std::shared_ptr<DefaultBackgroundTaskRunner> background_task_runner_;
+ std::shared_ptr<DefaultWorkerThreadsTaskRunner> worker_threads_task_runner_;
std::map<v8::Isolate*, std::shared_ptr<DefaultForegroundTaskRunner>>
foreground_task_runner_map_;
diff --git a/deps/v8/src/libplatform/default-background-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index b556b6c3fe..bc2ebbd015 100644
--- a/deps/v8/src/libplatform/default-background-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/libplatform/default-background-task-runner.h"
+#include "src/libplatform/default-worker-threads-task-runner.h"
#include "src/base/platform/mutex.h"
#include "src/libplatform/worker-thread.h"
@@ -10,19 +10,19 @@
namespace v8 {
namespace platform {
-DefaultBackgroundTaskRunner::DefaultBackgroundTaskRunner(
+DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(
uint32_t thread_pool_size) {
for (uint32_t i = 0; i < thread_pool_size; ++i) {
thread_pool_.push_back(base::make_unique<WorkerThread>(&queue_));
}
}
-DefaultBackgroundTaskRunner::~DefaultBackgroundTaskRunner() {
+DefaultWorkerThreadsTaskRunner::~DefaultWorkerThreadsTaskRunner() {
// This destructor is needed because we have unique_ptr to the WorkerThreads,
// und the {WorkerThread} class is forward declared in the header file.
}
-void DefaultBackgroundTaskRunner::Terminate() {
+void DefaultWorkerThreadsTaskRunner::Terminate() {
base::LockGuard<base::Mutex> guard(&lock_);
terminated_ = true;
queue_.Terminate();
@@ -30,28 +30,29 @@ void DefaultBackgroundTaskRunner::Terminate() {
thread_pool_.clear();
}
-void DefaultBackgroundTaskRunner::PostTask(std::unique_ptr<Task> task) {
+void DefaultWorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
base::LockGuard<base::Mutex> guard(&lock_);
if (terminated_) return;
queue_.Append(std::move(task));
}
-void DefaultBackgroundTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
- double delay_in_seconds) {
+void DefaultWorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
+ double delay_in_seconds) {
base::LockGuard<base::Mutex> guard(&lock_);
if (terminated_) return;
- // There is no use case for this function on a background thread at the
+ // There is no use case for this function on a worker thread at the
// moment, but it is still part of the interface.
UNIMPLEMENTED();
}
-void DefaultBackgroundTaskRunner::PostIdleTask(std::unique_ptr<IdleTask> task) {
- // There are no idle background tasks.
+void DefaultWorkerThreadsTaskRunner::PostIdleTask(
+ std::unique_ptr<IdleTask> task) {
+ // There are no idle worker tasks.
UNREACHABLE();
}
-bool DefaultBackgroundTaskRunner::IdleTasksEnabled() {
- // There are no idle background tasks.
+bool DefaultWorkerThreadsTaskRunner::IdleTasksEnabled() {
+ // There are no idle worker tasks.
return false;
}
diff --git a/deps/v8/src/libplatform/default-background-task-runner.h b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
index ce2f7efa05..5e46e3dd41 100644
--- a/deps/v8/src/libplatform/default-background-task-runner.h
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
-#define V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
+#ifndef V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_
+#define V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_
#include "include/v8-platform.h"
#include "src/libplatform/task-queue.h"
@@ -14,12 +14,12 @@ namespace platform {
class Thread;
class WorkerThread;
-class V8_PLATFORM_EXPORT DefaultBackgroundTaskRunner
+class V8_PLATFORM_EXPORT DefaultWorkerThreadsTaskRunner
: public NON_EXPORTED_BASE(TaskRunner) {
public:
- DefaultBackgroundTaskRunner(uint32_t thread_pool_size);
+ DefaultWorkerThreadsTaskRunner(uint32_t thread_pool_size);
- ~DefaultBackgroundTaskRunner();
+ ~DefaultWorkerThreadsTaskRunner();
void Terminate();
@@ -42,4 +42,4 @@ class V8_PLATFORM_EXPORT DefaultBackgroundTaskRunner
} // namespace platform
} // namespace v8
-#endif // V8_LIBPLATFORM_DEFAULT_BACKGROUND_TASK_RUNNER_H_
+#endif // V8_LIBPLATFORM_DEFAULT_WORKER_THREADS_TASK_RUNNER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.cc b/deps/v8/src/libplatform/tracing/trace-writer.cc
index 7bfc766469..36a8783499 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-writer.cc
@@ -119,12 +119,8 @@ void JSONTraceWriter::AppendArgValue(ConvertableToTraceFormat* value) {
stream_ << arg_stringified;
}
-JSONTraceWriter::JSONTraceWriter(std::ostream& stream)
- : JSONTraceWriter(stream, "traceEvents") {}
-
-JSONTraceWriter::JSONTraceWriter(std::ostream& stream, const std::string& tag)
- : stream_(stream) {
- stream_ << "{\"" << tag << "\":[";
+JSONTraceWriter::JSONTraceWriter(std::ostream& stream) : stream_(stream) {
+ stream_ << "{\"traceEvents\":[";
}
JSONTraceWriter::~JSONTraceWriter() { stream_ << "]}"; }
@@ -175,11 +171,6 @@ TraceWriter* TraceWriter::CreateJSONTraceWriter(std::ostream& stream) {
return new JSONTraceWriter(stream);
}
-TraceWriter* TraceWriter::CreateJSONTraceWriter(std::ostream& stream,
- const std::string& tag) {
- return new JSONTraceWriter(stream, tag);
-}
-
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.h b/deps/v8/src/libplatform/tracing/trace-writer.h
index d811351389..7e1bdc24d6 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.h
+++ b/deps/v8/src/libplatform/tracing/trace-writer.h
@@ -14,7 +14,6 @@ namespace tracing {
class JSONTraceWriter : public TraceWriter {
public:
explicit JSONTraceWriter(std::ostream& stream);
- JSONTraceWriter(std::ostream& stream, const std::string& tag);
~JSONTraceWriter();
void AppendTraceEvent(TraceObject* trace_event) override;
void Flush() override;
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index feb14ea1a0..9f530293d4 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -32,7 +32,7 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_handles ||
FLAG_log_suspect || FLAG_ll_prof || FLAG_perf_basic_prof ||
- FLAG_perf_prof || FLAG_log_source_code ||
+ FLAG_perf_prof || FLAG_log_source_code || FLAG_gdbjit ||
FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic ||
FLAG_log_function_events;
}
@@ -83,7 +83,7 @@ class Log {
void AppendCharacter(const char character);
// Delegate insertion to the underlying {log_}.
- // All appened srings are escaped to maintain one-line log entries.
+ // All appended strings are escaped to maintain one-line log entries.
template <typename T>
MessageBuilder& operator<<(T value) {
log_->os_ << value;
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 90023e3731..8f4543feb1 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -32,7 +32,7 @@
#include "src/unicode-inl.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-objects-inl.h"
#include "src/utils.h"
#include "src/version.h"
@@ -57,6 +57,16 @@ static const char* ComputeMarker(SharedFunctionInfo* shared,
}
}
+static const char* ComputeMarker(const wasm::WasmCode* code) {
+ switch (code->kind()) {
+ case wasm::WasmCode::kFunction:
+ return code->is_liftoff() ? "" : "*";
+ case wasm::WasmCode::kInterpreterStub:
+ return "~";
+ default:
+ return "";
+ }
+}
class CodeEventLogger::NameBuffer {
public:
@@ -204,7 +214,7 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
- wasm::WasmCode* code,
+ const wasm::WasmCode* code,
wasm::WasmName name) {
name_buffer_->Init(tag);
if (name.is_empty()) {
@@ -228,14 +238,6 @@ void CodeEventLogger::RegExpCodeCreateEvent(AbstractCode* code,
LogRecordedBuffer(code, nullptr, name_buffer_->get(), name_buffer_->size());
}
-void CodeEventLogger::InstructionStreamCreateEvent(
- LogEventsAndTags tag, const InstructionStream* stream,
- const char* description) {
- name_buffer_->Init(tag);
- name_buffer_->AppendBytes(description);
- LogRecordedBuffer(stream, name_buffer_->get(), name_buffer_->size());
-}
-
// Linux perf tool logging support
class PerfBasicLogger : public CodeEventLogger {
public:
@@ -249,9 +251,7 @@ class PerfBasicLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override;
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
void WriteLogRecordedBuffer(uintptr_t address, int size, const char* name,
int name_length);
@@ -309,30 +309,17 @@ void PerfBasicLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
return;
}
- WriteLogRecordedBuffer(reinterpret_cast<uintptr_t>(code->instruction_start()),
- code->instruction_size(), name, length);
+ WriteLogRecordedBuffer(reinterpret_cast<uintptr_t>(code->InstructionStart()),
+ code->InstructionSize(), name, length);
}
-void PerfBasicLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
- int length) {
+void PerfBasicLogger::LogRecordedBuffer(const wasm::WasmCode* code,
+ const char* name, int length) {
WriteLogRecordedBuffer(
reinterpret_cast<uintptr_t>(code->instructions().start()),
code->instructions().length(), name, length);
}
-void PerfBasicLogger::LogRecordedBuffer(const InstructionStream* stream,
- const char* name, int length) {
- // Linux perf expects hex literals without a leading 0x, while some
- // implementations of printf might prepend one when using the %p format
- // for pointers, leading to wrongly formatted JIT symbols maps.
- //
- // Instead, we use V8PRIxPTR format string and cast pointer to uintpr_t,
- // so that we have control over the exact output format.
- base::OS::FPrint(perf_output_handle_, "%" V8PRIxPTR " %x %.*s\n",
- reinterpret_cast<uintptr_t>(stream->bytes()),
- static_cast<int>(stream->byte_length()), length, name);
-}
-
// Low-level logging support.
#define LL_LOG(Call) if (ll_logger_) ll_logger_->Call;
@@ -350,9 +337,7 @@ class LowLevelLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override;
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
// Low-level profiling event structures.
@@ -441,31 +426,18 @@ void LowLevelLogger::LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo*,
const char* name, int length) {
CodeCreateStruct event;
event.name_size = length;
- event.code_address = code->instruction_start();
- event.code_size = code->instruction_size();
+ event.code_address = code->InstructionStart();
+ event.code_size = code->InstructionSize();
LogWriteStruct(event);
LogWriteBytes(name, length);
- LogWriteBytes(
- reinterpret_cast<const char*>(code->instruction_start()),
- code->instruction_size());
+ LogWriteBytes(reinterpret_cast<const char*>(code->InstructionStart()),
+ code->InstructionSize());
}
-void LowLevelLogger::LogRecordedBuffer(const InstructionStream* stream,
+void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
const char* name, int length) {
CodeCreateStruct event;
event.name_size = length;
- event.code_address = stream->bytes();
- event.code_size = static_cast<int32_t>(stream->byte_length());
- LogWriteStruct(event);
- LogWriteBytes(name, length);
- LogWriteBytes(reinterpret_cast<const char*>(stream->bytes()),
- static_cast<int>(stream->byte_length()));
-}
-
-void LowLevelLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
- int length) {
- CodeCreateStruct event;
- event.name_size = length;
event.code_address = code->instructions().start();
event.code_size = code->instructions().length();
LogWriteStruct(event);
@@ -476,8 +448,8 @@ void LowLevelLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
void LowLevelLogger::CodeMoveEvent(AbstractCode* from, Address to) {
CodeMoveStruct event;
- event.from_address = from->instruction_start();
- size_t header_size = from->instruction_start() - from->address();
+ event.from_address = from->InstructionStart();
+ size_t header_size = from->InstructionStart() - from->address();
event.to_address = to + header_size;
LogWriteStruct(event);
}
@@ -513,9 +485,7 @@ class JitLogger : public CodeEventLogger {
private:
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override;
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
JitCodeEventHandler code_event_handler_;
@@ -533,8 +503,10 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
JitCodeEvent event;
memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = code->instruction_start();
- event.code_len = code->instruction_size();
+ event.code_start = code->InstructionStart();
+ event.code_type =
+ code->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
+ event.code_len = code->InstructionSize();
Handle<SharedFunctionInfo> shared_function_handle;
if (shared && shared->script()->IsScript()) {
shared_function_handle = Handle<SharedFunctionInfo>(shared);
@@ -545,25 +517,12 @@ void JitLogger::LogRecordedBuffer(AbstractCode* code,
code_event_handler_(&event);
}
-void JitLogger::LogRecordedBuffer(const InstructionStream* stream,
- const char* name, int length) {
- JitCodeEvent event;
- memset(&event, 0, sizeof(event));
- event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = stream->bytes();
- event.code_len = stream->byte_length();
- Handle<SharedFunctionInfo> shared_function_handle;
- event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
- event.name.str = name;
- event.name.len = length;
- code_event_handler_(&event);
-}
-
-void JitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event;
memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
+ event.code_type = JitCodeEvent::JIT_CODE;
event.code_start = code->instructions().start();
event.code_len = code->instructions().length();
event.name.str = name;
@@ -576,11 +535,13 @@ void JitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
JitCodeEvent event;
event.type = JitCodeEvent::CODE_MOVED;
- event.code_start = from->instruction_start();
- event.code_len = from->instruction_size();
+ event.code_type =
+ from->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
+ event.code_start = from->InstructionStart();
+ event.code_len = from->InstructionSize();
// Calculate the header size.
- const size_t header_size = from->instruction_start() - from->address();
+ const size_t header_size = from->InstructionStart() - from->address();
// Calculate the new start address of the instructions.
event.new_code_start = to + header_size;
@@ -932,7 +893,7 @@ void Logger::CodeDeoptEvent(Code* code, DeoptKind kind, Address pc,
Log::MessageBuilder msg(log_);
msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
<< code->CodeSize() << kNext
- << reinterpret_cast<void*>(code->instruction_start());
+ << reinterpret_cast<void*>(code->InstructionStart());
// Deoptimization position.
std::ostringstream deopt_location;
@@ -1109,20 +1070,8 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
AbstractCode* code, base::ElapsedTimer* timer) {
- AppendCodeCreateHeader(msg, tag, code->kind(), code->instruction_start(),
- code->instruction_size(), timer);
-}
-
-void AppendCodeCreateHeader(Log::MessageBuilder& msg,
- CodeEventListener::LogEventsAndTags tag,
- const InstructionStream* stream,
- base::ElapsedTimer* timer) {
- // TODO(jgruber,v8:6666): In time, we'll need to support non-builtin streams.
- msg << kLogEventsNames[CodeEventListener::CODE_CREATION_EVENT]
- << Logger::kNext << kLogEventsNames[tag] << Logger::kNext << Code::BUILTIN
- << Logger::kNext << timer->Elapsed().InMicroseconds() << Logger::kNext
- << reinterpret_cast<void*>(stream->bytes()) << Logger::kNext
- << stream->byte_length() << Logger::kNext;
+ AppendCodeCreateHeader(msg, tag, code->kind(), code->InstructionStart(),
+ code->InstructionSize(), timer);
}
} // namespace
@@ -1165,7 +1114,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- wasm::WasmCode* code, wasm::WasmName name) {
+ const wasm::WasmCode* code, wasm::WasmName name) {
if (!is_logging_code_events()) return;
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1175,8 +1124,16 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (name.is_empty()) {
msg << "<unknown wasm>";
} else {
- msg << name.start();
- }
+ msg.AppendStringPart(name.start(), name.length());
+ }
+ // We have to add two extra fields that allow the tick processor to group
+ // events for the same wasm function, even if it gets compiled again. For
+ // normal JS functions, we use the shared function info. For wasm, the pointer
+ // to the native module + function index works well enough.
+ // TODO(herhut) Clean up the tick processor code instead.
+ void* tag_ptr =
+ reinterpret_cast<byte*>(code->native_module()) + code->index();
+ msg << kNext << tag_ptr << kNext << ComputeMarker(code);
msg.WriteToLogFile();
}
@@ -1245,8 +1202,8 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// <fns> is the function table encoded as a sequence of strings
// S<shared-function-info-address>
msg << "code-source-info" << kNext
- << static_cast<void*>(code->instruction_start()) << kNext << script_id
- << kNext << shared->start_position() << kNext << shared->end_position()
+ << static_cast<void*>(code->InstructionStart()) << kNext << script_id
+ << kNext << shared->StartPosition() << kNext << shared->EndPosition()
<< kNext;
SourcePositionTableIterator iterator(code->source_position_table());
@@ -1327,42 +1284,46 @@ void Logger::RegExpCodeCreateEvent(AbstractCode* code, String* source) {
msg.WriteToLogFile();
}
-void Logger::InstructionStreamCreateEvent(LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description) {
- if (!is_logging_code_events()) return;
- if (!FLAG_log_code || !log_->IsEnabled()) return;
- Log::MessageBuilder msg(log_);
- AppendCodeCreateHeader(msg, tag, stream, &timer_);
- msg << description;
- msg.WriteToLogFile();
-}
-
void Logger::CodeMoveEvent(AbstractCode* from, Address to) {
if (!is_logging_code_events()) return;
MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(), to);
}
-void Logger::CodeLinePosInfoRecordEvent(Address code_start,
- ByteArray* source_position_table) {
- if (jit_logger_) {
- void* jit_handler_data = jit_logger_->StartCodePosInfoEvent();
- for (SourcePositionTableIterator iter(source_position_table); !iter.done();
- iter.Advance()) {
+namespace {
+
+void CodeLinePosEvent(JitLogger* jit_logger, Address code_start,
+ SourcePositionTableIterator& iter) {
+ if (jit_logger) {
+ void* jit_handler_data = jit_logger->StartCodePosInfoEvent();
+ for (; !iter.done(); iter.Advance()) {
if (iter.is_statement()) {
- jit_logger_->AddCodeLinePosInfoEvent(
+ jit_logger->AddCodeLinePosInfoEvent(
jit_handler_data, iter.code_offset(),
iter.source_position().ScriptOffset(),
JitCodeEvent::STATEMENT_POSITION);
}
- jit_logger_->AddCodeLinePosInfoEvent(
- jit_handler_data, iter.code_offset(),
- iter.source_position().ScriptOffset(), JitCodeEvent::POSITION);
+ jit_logger->AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
+ iter.source_position().ScriptOffset(),
+ JitCodeEvent::POSITION);
}
- jit_logger_->EndCodePosInfoEvent(code_start, jit_handler_data);
+ jit_logger->EndCodePosInfoEvent(code_start, jit_handler_data);
}
}
+} // namespace
+
+void Logger::CodeLinePosInfoRecordEvent(Address code_start,
+ ByteArray* source_position_table) {
+ SourcePositionTableIterator iter(source_position_table);
+ CodeLinePosEvent(jit_logger_, code_start, iter);
+}
+
+void Logger::CodeLinePosInfoRecordEvent(
+ Address code_start, Vector<const byte> source_position_table) {
+ SourcePositionTableIterator iter(source_position_table);
+ CodeLinePosEvent(jit_logger_, code_start, iter);
+}
+
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
Log::MessageBuilder msg(log_);
@@ -1616,19 +1577,9 @@ static int EnumerateCompiledFunctions(Heap* heap,
if (sfi->is_compiled() &&
(!sfi->script()->IsScript() ||
Script::cast(sfi->script())->HasValidSource())) {
- // In some cases, an SFI might have (and have executing!) both bytecode
- // and baseline code, so check for both and add them both if needed.
- if (sfi->HasBytecodeArray()) {
- AddFunctionAndCode(sfi, AbstractCode::cast(sfi->bytecode_array()),
- sfis, code_objects, compiled_funcs_count);
- ++compiled_funcs_count;
- }
-
- if (!sfi->IsInterpreted()) {
- AddFunctionAndCode(sfi, AbstractCode::cast(sfi->code()), sfis,
- code_objects, compiled_funcs_count);
- ++compiled_funcs_count;
- }
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi->abstract_code()), sfis,
+ code_objects, compiled_funcs_count);
+ ++compiled_funcs_count;
}
} else if (obj->IsJSFunction()) {
// Given that we no longer iterate over all optimized JSFunctions, we need
@@ -1661,7 +1612,7 @@ static int EnumerateWasmModules(Heap* heap,
for (HeapObject* obj = iterator.next(); obj != nullptr;
obj = iterator.next()) {
- if (WasmCompiledModule::IsWasmCompiledModule(obj)) {
+ if (obj->IsWasmCompiledModule()) {
WasmCompiledModule* module = WasmCompiledModule::cast(obj);
if (modules != nullptr) {
modules[wasm_modules_count] = Handle<WasmCompiledModule>(module);
@@ -1705,10 +1656,6 @@ void Logger::LogCodeObject(Object* object) {
description = "A JavaScript to Wasm adapter";
tag = CodeEventListener::STUB_TAG;
break;
- case AbstractCode::WASM_TO_WASM_FUNCTION:
- description = "A cross-instance Wasm adapter";
- tag = CodeEventListener::STUB_TAG;
- break;
case AbstractCode::WASM_TO_JS_FUNCTION:
description = "A Wasm to JavaScript adapter";
tag = CodeEventListener::STUB_TAG;
@@ -1727,13 +1674,6 @@ void Logger::LogCodeObject(Object* object) {
PROFILE(isolate_, CodeCreateEvent(tag, code_object, description));
}
-void Logger::LogInstructionStream(Code* code, const InstructionStream* stream) {
- DCHECK(Builtins::IsBuiltin(code));
- const char* description = isolate_->builtins()->name(code->builtin_index());
- CodeEventListener::LogEventsAndTags tag = CodeEventListener::BUILTIN_TAG;
- PROFILE(isolate_, InstructionStreamCreateEvent(tag, stream, description));
-}
-
void Logger::LogCodeObjects() {
Heap* heap = isolate_->heap();
HeapIterator iterator(heap);
@@ -1780,9 +1720,9 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code) {
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
- int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
+ int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
int column_num =
- Script::GetColumnNumber(script, shared->start_position()) + 1;
+ Script::GetColumnNumber(script, shared->StartPosition()) + 1;
if (script->name()->IsString()) {
Handle<String> script_name(String::cast(script->name()));
if (line_num > 0) {
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index b540c86173..6f3e1f244a 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -182,20 +182,19 @@ class Logger : public CodeEventListener {
AbstractCode* code, SharedFunctionInfo* shared,
Name* source, int line, int column);
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- wasm::WasmCode* code, wasm::WasmName name);
+ const wasm::WasmCode* code, wasm::WasmName name);
// Emits a code deoptimization event.
void CodeDisableOptEvent(AbstractCode* code, SharedFunctionInfo* shared);
void CodeMovingGCEvent();
// Emits a code create event for a RegExp.
void RegExpCodeCreateEvent(AbstractCode* code, String* source);
- void InstructionStreamCreateEvent(LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description);
// Emits a code move event.
void CodeMoveEvent(AbstractCode* from, Address to);
// Emits a code line info record event.
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray* source_position_table);
+ void CodeLinePosInfoRecordEvent(Address code_start,
+ Vector<const byte> source_position_table);
void SharedFunctionInfoMoveEvent(Address from, Address to);
@@ -268,9 +267,6 @@ class Logger : public CodeEventListener {
// Used for logging stubs found in the snapshot.
void LogCodeObject(Object* code_object);
- // Used for logging off-heap instruction streams.
- void LogInstructionStream(Code* code, const InstructionStream* stream);
-
private:
explicit Logger(Isolate* isolate);
~Logger();
@@ -393,13 +389,10 @@ class CodeEventLogger : public CodeEventListener {
void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source, int line,
int column) override;
- void CodeCreateEvent(LogEventsAndTags tag, wasm::WasmCode* code,
+ void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
wasm::WasmName name) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void InstructionStreamCreateEvent(LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description) override;
void CallbackEvent(Name* name, Address entry_point) override {}
void GetterCallbackEvent(Name* name, Address entry_point) override {}
void SetterCallbackEvent(Name* name, Address entry_point) override {}
@@ -413,9 +406,7 @@ class CodeEventLogger : public CodeEventListener {
virtual void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) = 0;
- virtual void LogRecordedBuffer(const InstructionStream* stream,
- const char* name, int length) = 0;
- virtual void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ virtual void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) = 0;
NameBuffer* name_buffer_;
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 58ad9318dd..2050114994 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -74,12 +74,17 @@ LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
return LookupIterator(receiver, name, configuration);
}
+// TODO(ishell): Consider removing this way of LookupIterator creation.
// static
LookupIterator LookupIterator::ForTransitionHandler(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<Object> value, MaybeHandle<Object> handler,
- Handle<Map> transition_map) {
- if (handler.is_null()) return LookupIterator(receiver, name);
+ Handle<Object> value, MaybeHandle<Map> maybe_transition_map) {
+ Handle<Map> transition_map;
+ if (!maybe_transition_map.ToHandle(&transition_map) ||
+ !transition_map->IsPrototypeValidityCellValid()) {
+ // This map is not a valid transition handler, so full lookup is required.
+ return LookupIterator(receiver, name);
+ }
PropertyDetails details = PropertyDetails::Empty();
bool has_property;
@@ -90,6 +95,13 @@ LookupIterator LookupIterator::ForTransitionHandler(
details = transition_map->GetLastDescriptorDetails();
has_property = true;
}
+#ifdef DEBUG
+ if (name->IsPrivate()) {
+ DCHECK_EQ(DONT_ENUM, details.attributes());
+ } else {
+ DCHECK_EQ(NONE, details.attributes());
+ }
+#endif
LookupIterator it(isolate, receiver, name, transition_map, details,
has_property);
@@ -260,14 +272,27 @@ void LookupIterator::InternalUpdateProtector() {
if (isolate_->bootstrapper()->IsActive()) return;
if (*name_ == heap()->constructor_string()) {
- if (!isolate_->IsSpeciesLookupChainIntact()) return;
+ if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact() &&
+ !isolate_->IsPromiseSpeciesLookupChainIntact())
+ return;
// Setting the constructor property could change an instance's @@species
- if (holder_->IsJSArray() || holder_->IsJSPromise() ||
- holder_->IsJSTypedArray()) {
+ if (holder_->IsJSArray()) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
- isolate_->InvalidateSpeciesProtector();
- } else if (holder_->map()->is_prototype_map()) {
+ isolate_->InvalidateArraySpeciesProtector();
+ return;
+ } else if (holder_->IsJSPromise()) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ return;
+ } else if (holder_->IsJSTypedArray()) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
+ return;
+ }
+ if (holder_->map()->is_prototype_map()) {
DisallowHeapAllocation no_gc;
// Setting the constructor of Array.prototype, Promise.prototype or
// %TypedArray%.prototype of any realm also needs to invalidate the
@@ -276,26 +301,49 @@ void LookupIterator::InternalUpdateProtector() {
// have different prototypes for each type, and their parent prototype is
// pointing the same TYPED_ARRAY_PROTOTYPE.
if (isolate_->IsInAnyContext(*holder_,
- Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
- isolate_->IsInAnyContext(*holder_,
- Context::PROMISE_PROTOTYPE_INDEX) ||
- isolate_->IsInAnyContext(holder_->map()->prototype(),
- Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
- isolate_->CountUsage(v8::Isolate::UseCounterFeature::
- kArrayPrototypeConstructorModified);
- isolate_->InvalidateSpeciesProtector();
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArrayPrototypeConstructorModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (isolate_->IsInAnyContext(
+ holder_->map()->prototype(),
+ Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
}
}
+ } else if (*name_ == heap()->next_string()) {
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
+ // Setting the next property of %ArrayIteratorPrototype% also needs to
+ // invalidate the array iterator protector.
+ if (isolate_->IsInAnyContext(
+ *holder_, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX)) {
+ isolate_->InvalidateArrayIteratorProtector();
+ }
} else if (*name_ == heap()->species_symbol()) {
- if (!isolate_->IsSpeciesLookupChainIntact()) return;
+ if (!isolate_->IsArraySpeciesLookupChainIntact() &&
+ !isolate_->IsTypedArraySpeciesLookupChainIntact() &&
+ !isolate_->IsPromiseSpeciesLookupChainIntact())
+ return;
// Setting the Symbol.species property of any Array, Promise or TypedArray
// constructor invalidates the @@species protector
- if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX) ||
- isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX) ||
- IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
+ if (isolate_->IsInAnyContext(*holder_, Context::ARRAY_FUNCTION_INDEX)) {
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
isolate_->CountUsage(
v8::Isolate::UseCounterFeature::kArraySpeciesModified);
- isolate_->InvalidateSpeciesProtector();
+ isolate_->InvalidateArraySpeciesProtector();
+ } else if (isolate_->IsInAnyContext(*holder_,
+ Context::PROMISE_FUNCTION_INDEX)) {
+ if (!isolate_->IsPromiseSpeciesLookupChainIntact()) return;
+ isolate_->InvalidatePromiseSpeciesProtector();
+ } else if (IsTypedArrayFunctionInAnyContext(isolate_, *holder_)) {
+ if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
+ isolate_->InvalidateTypedArraySpeciesProtector();
}
} else if (*name_ == heap()->is_concat_spreadable_symbol()) {
if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
@@ -305,11 +353,25 @@ void LookupIterator::InternalUpdateProtector() {
if (holder_->IsJSArray()) {
isolate_->InvalidateArrayIteratorProtector();
}
+ } else if (*name_ == heap()->resolve_string()) {
+ if (!isolate_->IsPromiseResolveLookupChainIntact()) return;
+ // Setting the "resolve" property on any %Promise% intrinsic object
+ // invalidates the Promise.resolve protector.
+ if (isolate_->IsInAnyContext(*holder_, Context::PROMISE_FUNCTION_INDEX)) {
+ isolate_->InvalidatePromiseResolveProtector();
+ }
} else if (*name_ == heap()->then_string()) {
if (!isolate_->IsPromiseThenLookupChainIntact()) return;
// Setting the "then" property on any JSPromise instance or on the
// initial %PromisePrototype% invalidates the Promise#then protector.
+ // Also setting the "then" property on the initial %ObjectPrototype%
+ // invalidates the Promise#then protector, since we use this protector
+ // to guard the fast-path in AsyncGeneratorResolve, where we can skip
+ // the ResolvePromise step and go directly to FulfillPromise if we
+ // know that the Object.prototype doesn't contain a "then" method.
if (holder_->IsJSPromise() ||
+ isolate_->IsInAnyContext(*holder_,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX) ||
isolate_->IsInAnyContext(*holder_, Context::PROMISE_PROTOTYPE_INDEX)) {
isolate_->InvalidatePromiseThenProtector();
}
@@ -421,6 +483,14 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
if (!IsElement() && !holder_obj->HasFastProperties()) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
+ if (holder_obj->map()->is_prototype_map() &&
+ (property_details_.attributes() & READ_ONLY) == 0 &&
+ (attributes & READ_ONLY) != 0) {
+ // Invalidate prototype validity cell when a property is reconfigured
+ // from writable to read-only as this may invalidate transitioning store
+ // IC handlers.
+ JSObject::InvalidatePrototypeChains(holder->map());
+ }
if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
JSGlobalObject::cast(*holder_obj)->global_dictionary());
@@ -453,14 +523,12 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
// Can only be called when the receiver is a JSObject. JSProxy has to be handled
// via a trap. Adding properties to primitive values is not observable.
-// Returns true if a new transition has been created, or false if an existing
-// transition was followed.
-bool LookupIterator::PrepareTransitionToDataProperty(
+void LookupIterator::PrepareTransitionToDataProperty(
Handle<JSReceiver> receiver, Handle<Object> value,
PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
DCHECK_IMPLIES(receiver->IsJSProxy(), name()->IsPrivate());
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
- if (state_ == TRANSITION) return false;
+ if (state_ == TRANSITION) return;
if (!IsElement() && name()->IsPrivate()) {
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
@@ -506,13 +574,11 @@ bool LookupIterator::PrepareTransitionToDataProperty(
PropertyDetails(kData, attributes, PropertyCellType::kNoCell);
transition_ = map;
}
- return false;
+ return;
}
- bool created_new_map;
Handle<Map> transition = Map::TransitionToDataProperty(
- map, name_, value, attributes, kDefaultFieldConstness, store_mode,
- &created_new_map);
+ map, name_, value, attributes, kDefaultFieldConstness, store_mode);
state_ = TRANSITION;
transition_ = transition;
@@ -524,7 +590,6 @@ bool LookupIterator::PrepareTransitionToDataProperty(
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
}
- return created_new_map;
}
void LookupIterator::ApplyTransitionToDataProperty(
@@ -541,6 +606,15 @@ void LookupIterator::ApplyTransitionToDataProperty(
Handle<Map> transition = transition_map();
bool simple_transition = transition->GetBackPointer() == receiver->map();
+ if (configuration_ == DEFAULT && !transition->is_dictionary_map() &&
+ !transition->IsPrototypeValidityCellValid()) {
+ // Only LookupIterator instances with DEFAULT (full prototype chain)
+ // configuration can produce valid transition handler maps.
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(transition, isolate());
+ transition->set_prototype_validity_cell(*validity_cell);
+ }
+
if (!receiver->IsJSProxy()) {
JSObject::MigrateToMap(Handle<JSObject>::cast(receiver), transition);
}
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index e107f534df..5a4135d8e0 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -5,8 +5,8 @@
#ifndef V8_LOOKUP_H_
#define V8_LOOKUP_H_
-#include "src/factory.h"
#include "src/globals.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
@@ -135,12 +135,9 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Isolate* isolate, Handle<Object> receiver, Handle<Object> key,
bool* success, Configuration configuration = DEFAULT);
- static LookupIterator ForTransitionHandler(Isolate* isolate,
- Handle<Object> receiver,
- Handle<Name> name,
- Handle<Object> value,
- MaybeHandle<Object> handler,
- Handle<Map> transition_map);
+ static LookupIterator ForTransitionHandler(
+ Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
+ Handle<Object> value, MaybeHandle<Map> maybe_transition_map);
void Restart() {
InterceptorState state = InterceptorState::kUninitialized;
@@ -219,7 +216,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
(IsElement() || !name_->IsPrivate());
}
void PrepareForDataProperty(Handle<Object> value);
- bool PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
+ void PrepareTransitionToDataProperty(Handle<JSReceiver> receiver,
Handle<Object> value,
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
@@ -278,9 +275,9 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
// CodeStubAssembler::CheckForAssociatedProtector!
if (*name_ == heap()->is_concat_spreadable_symbol() ||
*name_ == heap()->constructor_string() ||
- *name_ == heap()->species_symbol() ||
+ *name_ == heap()->next_string() || *name_ == heap()->species_symbol() ||
*name_ == heap()->iterator_symbol() ||
- *name_ == heap()->then_string()) {
+ *name_ == heap()->resolve_string() || *name_ == heap()->then_string()) {
InternalUpdateProtector();
}
}
@@ -306,7 +303,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
Handle<Map> GetReceiverMap() const;
- MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
+ V8_WARN_UNUSED_RESULT inline JSReceiver* NextHolder(Map* map);
template <bool is_element>
V8_EXPORT_PRIVATE void Start();
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index fcdddbb1fa..a78476a82a 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -7,6 +7,7 @@
#include "src/assembler.h"
#include "src/frames.h"
+#include "src/heap/heap.h"
// Helper types to make boolean flag easier to read at call-site.
enum InvokeFlag {
diff --git a/deps/v8/src/managed.h b/deps/v8/src/managed.h
index d0ccb4e739..f98a86bec1 100644
--- a/deps/v8/src/managed.h
+++ b/deps/v8/src/managed.h
@@ -5,9 +5,9 @@
#ifndef V8_MANAGED_H_
#define V8_MANAGED_H_
-#include "src/factory.h"
#include "src/global-handles.h"
#include "src/handles.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
namespace v8 {
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 5876e5f5e4..fe1c246e95 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -194,8 +194,8 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
- if (shared->name()->BooleanValue()) {
- return shared->name();
+ if (shared->Name()->BooleanValue()) {
+ return shared->Name();
}
return shared->inferred_name();
@@ -385,7 +385,7 @@ Handle<Object> JSStackFrame::GetMethodName() {
return isolate_->factory()->null_value();
}
- Handle<String> name(function_->shared()->name(), isolate_);
+ Handle<String> name(function_->shared()->Name(), isolate_);
// ES2015 gives getters and setters name prefixes which must
// be stripped to find the property name.
if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
@@ -648,18 +648,10 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
if (array->IsWasmInterpretedFrame(frame_ix)) {
- code_ = {};
+ code_ = nullptr;
} else {
- code_ =
- FLAG_wasm_jit_to_native
- ? WasmCodeWrapper(
- wasm_instance_->compiled_module()->GetNativeModule()->GetCode(
- wasm_func_index_))
- : WasmCodeWrapper(handle(
- Code::cast(
- wasm_instance_->compiled_module()->code_table()->get(
- wasm_func_index_)),
- isolate));
+ code_ = wasm_instance_->compiled_module()->GetNativeModule()->GetCode(
+ wasm_func_index_);
}
offset_ = array->Offset(frame_ix)->value();
}
@@ -723,11 +715,8 @@ MaybeHandle<String> WasmStackFrame::ToString() {
int WasmStackFrame::GetPosition() const {
return IsInterpreted()
? offset_
- : (code_.IsCodeObject()
- ? Handle<AbstractCode>::cast(code_.GetCode())
- ->SourcePosition(offset_)
- : FrameSummary::WasmCompiledFrameSummary::
- GetWasmSourcePosition(code_.GetWasmCode(), offset_));
+ : FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
+ code_, offset_);
}
Handle<Object> WasmStackFrame::Null() const {
@@ -778,10 +767,8 @@ Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
int AsmJsWasmStackFrame::GetPosition() const {
DCHECK_LE(0, offset_);
int byte_offset =
- code_.IsCodeObject()
- ? Handle<AbstractCode>::cast(code_.GetCode())->SourcePosition(offset_)
- : FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
- code_.GetWasmCode(), offset_);
+ FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(code_,
+ offset_);
Handle<WasmSharedModuleData> shared(
wasm_instance_->compiled_module()->shared(), isolate_);
DCHECK_LE(0, byte_offset);
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 1a1a5b29ff..187bd4d308 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -13,7 +13,6 @@
#include <memory>
#include "src/handles.h"
-#include "src/wasm/wasm-code-wrapper.h"
namespace v8 {
namespace internal {
@@ -153,7 +152,7 @@ class WasmStackFrame : public StackFrameBase {
bool IsToplevel() override { return false; }
bool IsConstructor() override { return false; }
bool IsStrict() const override { return false; }
- bool IsInterpreted() const { return code_.is_null(); }
+ bool IsInterpreted() const { return code_ == nullptr; }
MaybeHandle<String> ToString() override;
@@ -165,7 +164,7 @@ class WasmStackFrame : public StackFrameBase {
Handle<WasmInstanceObject> wasm_instance_;
uint32_t wasm_func_index_;
- WasmCodeWrapper code_; // null for interpreted frames.
+ wasm::WasmCode* code_; // null for interpreted frames.
int offset_;
private:
@@ -344,6 +343,7 @@ class ErrorUtils : public AllStatic {
T(IteratorSymbolNonCallable, "Found non-callable @@iterator") \
T(IteratorValueNotAnObject, "Iterator value % is not an entry object") \
T(LanguageID, "Language ID should be string or object.") \
+ T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \
T(MethodCalledOnWrongObject, \
"Method % called on a non-object or on a wrong type of object.") \
T(MethodInvokedOnNullOrUndefined, \
@@ -556,7 +556,13 @@ class ErrorUtils : public AllStatic {
T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
+ T(ZeroDigitNumericSeparator, \
+ "Numeric separator can not be used after leading 0.") \
T(NumberFormatRange, "% argument must be between 0 and 100") \
+ T(TrailingNumericSeparator, \
+ "Numeric separators are not allowed at the end of numeric literals") \
+ T(ContinuousNumericSeparator, \
+ "Only one underscore is allowed as numeric separator") \
T(PropertyValueOutOfRange, "% value is out of range.") \
T(StackOverflow, "Maximum call stack size exceeded") \
T(ToPrecisionFormatRange, \
@@ -638,6 +644,9 @@ class ErrorUtils : public AllStatic {
T(NoCatchOrFinally, "Missing catch or finally after try") \
T(NotIsvar, "builtin %%IS_VAR: not a variable") \
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(FlattenPastSafeLength, \
+ "Flattening % elements on an array-like of length % " \
+ "is disallowed, as the total surpasses 2**53-1") \
T(PushPastSafeLength, \
"Pushing % elements on an array-like of length % " \
"is disallowed, as the total surpasses 2**53-1") \
@@ -686,6 +695,7 @@ class ErrorUtils : public AllStatic {
T(TooManySpreads, \
"Literal containing too many nested spreads (up to 65534 allowed)") \
T(TooManyVariables, "Too many variables declared (only 4194303 allowed)") \
+ T(TooManyElementsInPromiseAll, "Too many elements passed to Promise.all") \
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index a5e2335852..bc4d95b4fa 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -82,7 +82,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -212,6 +213,12 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
@@ -240,6 +247,12 @@ Address RelocInfo::target_internal_reference_address() {
return reinterpret_cast<Address>(pc_);
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -254,6 +267,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -281,6 +299,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a39c06eaa2..6365290f4c 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -183,11 +183,9 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE |
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
-
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on MIPS means that it is a lui/ori instruction, and that is
@@ -3038,14 +3036,14 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
+ emit(instr, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
+ emit(instr, CompactBranchType::COMPACT_BRANCH);
}
@@ -3083,16 +3081,20 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
// ---------- MSA instructions ------------
@@ -3666,7 +3668,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index a5d608898f..dbb803bad2 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -320,9 +320,9 @@ const Simd128Register no_msareg = Simd128Register::no_reg();
// cp is assumed to be a callee saved register.
constexpr Register kRootRegister = s6;
constexpr Register cp = s7;
-constexpr Register kLithiumScratchReg = s3;
-constexpr Register kLithiumScratchReg2 = s4;
-constexpr DoubleRegister kLithiumScratchDouble = f30;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips32r6 for compare operations.
constexpr DoubleRegister kDoubleCompareReg = f26;
@@ -674,9 +674,6 @@ class Assembler : public AssemblerBase {
// Helper values.
LAST_CODE_MARKER,
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
- // Code aging
- CODE_AGE_MARKER_NOP = 6,
- CODE_AGE_SEQUENCE_NOP
};
// Type == 0 is the default non-marking nop. For mips this is a
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 7ae3451f34..eb39e58959 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -43,7 +43,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
- DoubleRegister double_scratch = kLithiumScratchDouble;
+ DoubleRegister double_scratch = kScratchDoubleReg;
// Account for saved regs.
const int kArgumentOffset = 3 * kPointerSize;
@@ -179,53 +179,34 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = t3;
Label call_runtime, done, int_exponent;
- if (exponent_type() == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ Ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ EmitFPUTruncate(kRoundToMinusInf,
- scratch,
- double_exponent,
- at,
- double_scratch,
- scratch2,
- kCheckForInexactConversion);
- // scratch2 == 0 means there was no conversion error.
- __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
+ double_scratch, scratch2, kCheckForInexactConversion);
+ // scratch2 == 0 means there was no conversion error.
+ __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch2);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type() == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
__ mov_d(double_scratch, double_base); // Back up base.
__ Move(double_result, 1.0);
@@ -264,7 +245,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
+ __ CompareF64(EQ, double_result, kDoubleRegZero);
+ __ BranchFalseShortF(&done);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
@@ -466,6 +448,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ li(t9, Operand(pending_handler_entrypoint_address));
__ lw(t9, MemOperand(t9));
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index b2e52745ed..4641090cbc 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/mips/simulator-mips.h"
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/mips/frame-constants-mips.h
index 6d7e471b09..e90c7d957f 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/mips/frame-constants-mips.h
@@ -10,40 +10,42 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
+ static constexpr int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static constexpr int kCallerPCOffset = +1 * kPointerSize;
// MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static constexpr int kCallerSPOffset = +2 * kPointerSize;
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = +2 * kPointerSize;
- static const int kConstantPoolOffset = 0; // Not used.
+ static constexpr int kConstantPoolOffset = 0; // Not used.
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 84cf23c832..35953e509a 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -38,6 +38,14 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
}
}
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ return rt.immediate() == 0;
+ }
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -821,6 +829,70 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+ Slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+ Sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+}
+
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
if (rt.is_reg()) {
@@ -901,37 +973,38 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
+ Register input = src;
if (operand_size == 2) {
- Seh(src, src);
+ input = dest;
+ Seh(dest, src);
} else if (operand_size == 1) {
- Seb(src, src);
+ input = dest;
+ Seb(dest, src);
}
// No need to do any preparation if operand_size is 4
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- wsbh(dest, src);
+ wsbh(dest, input);
rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
- Register tmp = t0;
- Register tmp2 = t1;
+ Register tmp = at;
+ Register tmp2 = t8;
+ DCHECK(dest != tmp && dest != tmp2);
+ DCHECK(src != tmp && src != tmp2);
- andi(tmp2, src, 0xFF);
- sll(tmp2, tmp2, 24);
- or_(tmp, zero_reg, tmp2);
+ andi(tmp2, input, 0xFF);
+ sll(tmp, tmp2, 24);
- andi(tmp2, src, 0xFF00);
+ andi(tmp2, input, 0xFF00);
sll(tmp2, tmp2, 8);
or_(tmp, tmp, tmp2);
- srl(src, src, 8);
- andi(tmp2, src, 0xFF00);
+ srl(tmp2, input, 8);
+ andi(tmp2, tmp2, 0xFF00);
or_(tmp, tmp, tmp2);
- srl(src, src, 16);
- andi(tmp2, src, 0xFF);
- or_(tmp, tmp, tmp2);
-
- or_(dest, tmp, zero_reg);
+ srl(tmp2, input, 24);
+ or_(dest, tmp, tmp2);
}
}
@@ -940,25 +1013,28 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
DCHECK(operand_size == 1 || operand_size == 2);
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ Register input = src;
if (operand_size == 1) {
- andi(src, src, 0xFF);
+ input = dest;
+ andi(dest, src, 0xFF);
} else {
- andi(src, src, 0xFFFF);
+ input = dest;
+ andi(dest, src, 0xFFFF);
}
// No need to do any preparation if operand_size is 4
- wsbh(dest, src);
+ wsbh(dest, input);
rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
if (operand_size == 1) {
- sll(src, src, 24);
+ sll(dest, src, 24);
} else {
- Register tmp = t0;
+ Register tmp = at;
andi(tmp, src, 0xFF00);
- sll(src, src, 24);
+ sll(dest, src, 24);
sll(tmp, tmp, 8);
- or_(dest, tmp, src);
+ or_(dest, tmp, dest);
}
}
}
@@ -1163,47 +1239,54 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
- MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
- lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
- if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
- lwc1(nextfpreg,
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
+ if (IsFp32Mode()) { // fp32 mode.
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
+ lwc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(src.rm() != scratch);
+ lw(scratch,
MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- } else {
- DCHECK(IsFp64Mode() || IsFpxxMode());
- // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- DCHECK(src.rm() != scratch);
- lw(scratch, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- Mthc1(scratch, fd);
+ Mthc1(scratch, fd);
+ }
}
+ CheckTrampolinePoolQuick(1);
}
void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- BlockTrampolinePoolScope block_trampoline_pool(this);
- DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
- MemOperand tmp = src;
- AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
- swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
- if (IsFp32Mode()) { // fp32 mode.
- FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
- swc1(nextfpreg,
- MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
- } else {
- DCHECK(IsFp64Mode() || IsFpxxMode());
- // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- DCHECK(src.rm() != t8);
- Mfhc1(t8, fd);
- sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4);
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(tmp, OffsetAccessType::TWO_ACCESSES);
+ swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset));
+ if (IsFp32Mode()) { // fp32 mode.
+ FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1);
+ swc1(nextfpreg,
+ MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK(src.rm() != t8);
+ Mfhc1(t8, fd);
+ sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset));
+ }
}
+ CheckTrampolinePoolQuick(1);
}
void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
@@ -1360,33 +1443,33 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high,
void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- sllv(dst_low, src_low, shift);
- Nor(kScratchReg2, zero_reg, shift);
- srl(kScratchReg, src_low, 1);
- srlv(kScratchReg, kScratchReg, kScratchReg2);
- sllv(dst_high, src_high, shift);
- Or(dst_high, dst_high, kScratchReg);
- And(kScratchReg, shift, 32);
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ sllv(dst_low, src_low, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ srl(scratch1, src_low, 1);
+ srlv(scratch1, scratch1, scratch2);
+ sllv(dst_high, src_high, scratch3);
+ Or(dst_high, dst_high, scratch1);
+ And(scratch1, scratch3, 32);
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_high, dst_low);
mov(dst_low, zero_reg);
} else {
- movn(dst_high, dst_low, kScratchReg);
- movn(dst_low, zero_reg, kScratchReg);
+ movn(dst_high, dst_low, scratch1);
+ movn(dst_low, zero_reg, scratch1);
}
bind(&done);
}
void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1399,8 +1482,8 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
} else {
sll(dst_high, src_high, shift);
sll(dst_low, src_low, shift);
- srl(kScratchReg, src_low, 32 - shift);
- Or(dst_high, dst_high, kScratchReg);
+ srl(scratch, src_low, 32 - shift);
+ Or(dst_high, dst_high, scratch);
}
} else if (shift == 32) {
mov(dst_low, zero_reg);
@@ -1414,33 +1497,33 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high,
void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- srlv(dst_high, src_high, shift);
- Nor(kScratchReg2, zero_reg, shift);
- sll(kScratchReg, src_high, 1);
- sllv(kScratchReg, kScratchReg, kScratchReg2);
- srlv(dst_low, src_low, shift);
- Or(dst_low, dst_low, kScratchReg);
- And(kScratchReg, shift, 32);
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ srlv(dst_high, src_high, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ sll(scratch1, src_high, 1);
+ sllv(scratch1, scratch1, scratch2);
+ srlv(dst_low, src_low, scratch3);
+ Or(dst_low, dst_low, scratch1);
+ And(scratch1, scratch3, 32);
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_low, dst_high);
mov(dst_high, zero_reg);
} else {
- movn(dst_low, dst_high, kScratchReg);
- movn(dst_high, zero_reg, kScratchReg);
+ movn(dst_low, dst_high, scratch1);
+ movn(dst_high, zero_reg, scratch1);
}
bind(&done);
}
void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1454,8 +1537,8 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
srl(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
shift = 32 - shift;
- sll(kScratchReg, src_high, shift);
- Or(dst_low, dst_low, kScratchReg);
+ sll(scratch, src_high, shift);
+ Or(dst_low, dst_low, scratch);
}
} else if (shift == 32) {
mov(dst_high, zero_reg);
@@ -1469,19 +1552,20 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high,
void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- Register shift) {
+ Register shift, Register scratch1,
+ Register scratch2) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label done;
- Register kScratchReg = s3;
- Register kScratchReg2 = s4;
- And(shift, shift, 0x3F);
- srav(dst_high, src_high, shift);
- Nor(kScratchReg2, zero_reg, shift);
- sll(kScratchReg, src_high, 1);
- sllv(kScratchReg, kScratchReg, kScratchReg2);
- srlv(dst_low, src_low, shift);
- Or(dst_low, dst_low, kScratchReg);
- And(kScratchReg, shift, 32);
- Branch(&done, eq, kScratchReg, Operand(zero_reg));
+ Register scratch3 = t8;
+ And(scratch3, shift, 0x3F);
+ srav(dst_high, src_high, scratch3);
+ Nor(scratch2, zero_reg, scratch3);
+ sll(scratch1, src_high, 1);
+ sllv(scratch1, scratch1, scratch2);
+ srlv(dst_low, src_low, scratch3);
+ Or(dst_low, dst_low, scratch1);
+ And(scratch1, scratch3, 32);
+ Branch(&done, eq, scratch1, Operand(zero_reg));
mov(dst_low, dst_high);
sra(dst_high, dst_high, 31);
bind(&done);
@@ -1489,8 +1573,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
void TurboAssembler::SarPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
- uint32_t shift) {
- Register kScratchReg = s3;
+ uint32_t shift, Register scratch) {
shift = shift & 0x3F;
if (shift == 0) {
mov(dst_low, src_low);
@@ -1504,8 +1587,8 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high,
sra(dst_high, src_high, shift);
srl(dst_low, src_low, shift);
shift = 32 - shift;
- sll(kScratchReg, src_high, shift);
- Or(dst_low, dst_low, kScratchReg);
+ sll(scratch, src_high, shift);
+ Or(dst_low, dst_low, scratch);
}
} else if (shift == 32) {
sra(dst_high, src_high, 31);
@@ -1620,7 +1703,8 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF32(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF32(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
@@ -1644,7 +1728,8 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF64(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF64(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
@@ -1765,7 +1850,8 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF64(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1799,7 +1885,8 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF32(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -1818,6 +1905,125 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
bind(&done);
}
+template <typename RoundFunc>
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ Register scratch2 = t9;
+ if (IsMipsArchVariant(kMips32r6)) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_d(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ Label done;
+ Mfhc1(scratch, src);
+ Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
+ mov_d(dst, src);
+ round(this, dst, src);
+ Move(at, scratch2, dst);
+ or_(at, at, scratch2);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_d_l(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ Mthc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_l_d(dst, src);
+ });
+}
+
+template <typename RoundFunc>
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ if (IsMipsArchVariant(kMips32r6)) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_s(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ int32_t kFloat32ExponentBias = 127;
+ int32_t kFloat32MantissaBits = 23;
+ int32_t kFloat32ExponentBits = 8;
+ Label done;
+ mfc1(scratch, src);
+ Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits));
+ mov_s(dst, src);
+ round(this, dst, src);
+ mfc1(at, dst);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_s_w(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ mtc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_w_s(dst, src);
+ });
+}
+
void TurboAssembler::Mthc1(Register rt, FPURegister fs) {
if (IsFp32Mode()) {
mtc1(rt, fs.high());
@@ -1882,205 +2088,68 @@ void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
}
}
-void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
- Label* nan, Condition cond, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == al) {
- Branch(bd, target);
- return;
- }
+void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
+ FPURegister cmp1, FPURegister cmp2) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ sizeField = sizeField == D ? L : W;
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
+ cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ } else {
+ c(cc, sizeField, cmp1, cmp2);
+ }
+}
- if (IsMipsArchVariant(kMips32r6)) {
- sizeField = sizeField == D ? L : W;
- }
- DCHECK(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- bool long_branch =
- nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
- if (!IsMipsArchVariant(kMips32r6)) {
- if (long_branch) {
- Label skip;
- c(UN, sizeField, cmp1, cmp2);
- bc1f(&skip);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- c(UN, sizeField, cmp1, cmp2);
- bc1t(nan);
- if (bd == PROTECT) {
- nop();
- }
- }
- } else {
- // Use kDoubleCompareReg for comparison result. It has to be unavailable
- // to lithium register allocator.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- if (long_branch) {
- Label skip;
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(&skip, kDoubleCompareReg);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(nan, kDoubleCompareReg);
- if (bd == PROTECT) {
- nop();
- }
- }
- }
- }
+void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareF(sizeField, UN, cmp1, cmp2);
+}
- if (target) {
- bool long_branch =
- target->is_bound() ? !is_near(target) : is_trampoline_emitted();
- if (long_branch) {
- Label skip;
- Condition neg_cond = NegateFpuCondition(cond);
- BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- BranchLong(target, bd);
- bind(&skip);
- } else {
- BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
- }
- }
+void TurboAssembler::BranchTrueShortF(Label* target) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc1nez(target, kDoubleCompareReg);
+ nop();
+ } else {
+ bc1t(target);
+ nop();
}
}
-void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
- Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- switch (cc) {
- case lt:
- c(OLT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ult:
- c(ULT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ugt:
- c(OLE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case uge:
- c(OLT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ule:
- c(ULE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ne: // Unordered or not equal.
- c(EQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ogl:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- }
- }
+void TurboAssembler::BranchFalseShortF(Label* target) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ bc1eqz(target, kDoubleCompareReg);
+ nop();
} else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- // Use kDoubleCompareReg for comparison result, it is
- // valid in fp64 (FR = 1) mode which is implied for mips32r6.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- switch (cc) {
- case lt:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ult:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case gt:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ugt:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ge:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case uge:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case le:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ule:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case eq:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ueq:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ne:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ogl:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- default:
- CHECK(0);
- }
- }
- }
- if (bd == PROTECT) {
+ bc1f(target);
nop();
}
}
+void TurboAssembler::BranchTrueF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(target);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target);
+ }
+}
+
void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
@@ -2213,6 +2282,115 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) {
}
}
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
+ const Operand& rt, Condition cond) {
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() == 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionZero(rd, rs);
+ } else {
+ Subu(t9, rs, rt);
+ LoadZeroIfConditionZero(rd, t9);
+ }
+ break;
+ case ne:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() != 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionNotZero(rd, rs);
+ } else {
+ Subu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ break;
+ case greater_equal:
+ Sge(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case less:
+ Slt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case less_equal:
+ Sle(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs > rt
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case Uless:
+ Sltu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case Uless_equal:
+ Sleu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ seleqz(dest, dest, condition);
+ } else {
+ Movn(dest, zero_reg, condition);
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ if (IsMipsArchVariant(kMips32r6)) {
+ selnez(dest, dest, condition);
+ } else {
+ Movz(dest, zero_reg, condition);
+ }
+}
+
void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
Label done;
@@ -2404,7 +2582,8 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, nullptr, eq, double_input, double_scratch);
+ CompareF64(EQ, double_input, double_scratch);
+ BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
@@ -2450,7 +2629,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
- DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
@@ -2601,14 +2780,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
-static inline bool IsZero(const Operand& rt) {
- if (rt.is_reg()) {
- return rt.rm() == zero_reg;
- } else {
- return rt.immediate() == 0;
- }
-}
-
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@@ -4066,27 +4237,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- li(t0, Operand(debug_is_active));
- lb(t0, MemOperand(t0));
- Branch(&skip_hook, eq, t0, Operand(zero_reg));
-
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&call_hook, ne, t0, Operand(zero_reg));
-
- lw(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(t0, &skip_hook);
- lw(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
- And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4245,254 +4402,90 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-static inline void BranchOvfHelper(TurboAssembler* tasm, Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+void TurboAssembler::AddOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- tasm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
+ right_reg = right.rm();
}
-}
-void TurboAssembler::AddBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- if (right.is_reg()) {
- AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ addu(scratch, left, right_reg);
+ xor_(overflow, scratch, left);
+ xor_(at, scratch, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
} else {
- if (IsMipsArchVariant(kMips32r6)) {
- Register right_reg = t9;
- DCHECK(left != right_reg);
- li(right_reg, Operand(right));
- AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Addu(dst, left, right.immediate()); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- // Load right since xori takes uint16 as immediate.
- Addu(overflow_dst, zero_reg, right);
- xor_(overflow_dst, dst, overflow_dst);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- Addu(dst, left, right.immediate());
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(scratch, zero_reg, right);
- xor_(scratch, dst, scratch);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
+ addu(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(at, dst, right_reg);
+ and_(overflow, overflow, at);
}
}
-void TurboAssembler::AddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- if (IsMipsArchVariant(kMips32r6)) {
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- DCHECK(dst != scratch);
- Register left_reg = left == dst ? scratch : left;
- Register right_reg = right == dst ? t9 : right;
- DCHECK(dst != left_reg);
- DCHECK(dst != right_reg);
- Move(left_reg, left);
- Move(right_reg, right);
- addu(dst, left, right);
- Bnvc(left_reg, right_reg, no_overflow_label);
- } else {
- Bovc(left, right, overflow_label);
- addu(dst, left, right);
- if (no_overflow_label) bc(no_overflow_label);
- }
+void TurboAssembler::SubOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(right != overflow_dst);
- DCHECK(left != scratch);
- DCHECK(right != scratch);
-
- if (left == right && dst == left) {
- mov(overflow_dst, right);
- right = overflow_dst;
- }
-
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- addu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- addu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- addu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::SubBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Subu(dst, left, right.immediate()); // Left is overwritten.
- // Load right since xori takes uint16 as immediate.
- Addu(overflow_dst, zero_reg, right);
- xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
- xor_(scratch, dst, scratch); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- Subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- // Load right since xori takes uint16 as immediate.
- Addu(scratch, zero_reg, right);
- xor_(scratch, left, scratch);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::SubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left == right) {
- mov(dst, zero_reg);
- if (no_overflow_label) {
- Branch(no_overflow_label);
- }
+ right_reg = right.rm();
}
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- subu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- subu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- subu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
-}
-
-static inline void BranchOvfHelperMult(TurboAssembler* tasm,
- Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
- } else {
- tasm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
- }
-}
-
-void TurboAssembler::MulBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
-
- Mul(overflow_dst, dst, left, right.immediate());
- sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ if (dst == left || dst == right_reg) {
+ subu(scratch, left, right_reg);
+ xor_(overflow, left, scratch);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
+ } else {
+ subu(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
}
}
-void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
+void TurboAssembler::MulOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ Register scratch2 = t9;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
+ } else {
+ right_reg = right.rm();
+ }
- if (IsMipsArchVariant(kMips32r6) && dst == right) {
- mov(scratch, right);
- Mul(overflow_dst, dst, left, scratch);
- sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Mul(overflow, scratch2, left, right_reg);
+ sra(scratch, scratch2, 31);
+ xor_(overflow, overflow, scratch);
+ mov(dst, scratch2);
} else {
- Mul(overflow_dst, dst, left, right);
+ Mul(overflow, dst, left, right_reg);
sra(scratch, dst, 31);
- xor_(overflow_dst, overflow_dst, scratch);
+ xor_(overflow, overflow, scratch);
}
-
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -4551,12 +4544,19 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
- li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<int32_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -4943,6 +4943,20 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5013,15 +5027,18 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
max_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_right, nullptr, lt, src1, src2);
- BranchF32(&return_left, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
mfc1(t8, src1);
@@ -5056,15 +5073,18 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
min_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_left, nullptr, lt, src1, src2);
- BranchF32(&return_right, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
mfc1(t8, src1);
@@ -5099,15 +5119,18 @@ void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
max_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_right, nullptr, lt, src1, src2);
- BranchF64(&return_left, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Left equals right => check for -0.
Mfhc1(t8, src1);
@@ -5143,15 +5166,18 @@ void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (IsMipsArchVariant(kMips32r6)) {
min_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_left, nullptr, lt, src1, src2);
- BranchF64(&return_right, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
Mfhc1(t8, src1);
@@ -5382,7 +5408,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ li(kSpeculationPoisonRegister, -1);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 37d2c59270..3278932e19 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -210,30 +210,30 @@ class TurboAssembler : public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
- // Wrapper functions for the different cmp/branch types.
- inline void BranchF32(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
+ // Floating point branches
+ void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
+ CompareF(S, cc, cmp1, cmp2);
}
- inline void BranchF64(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
+ CompareIsNanF(S, cmp1, cmp2);
}
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
+ CompareF(D, cc, cmp1, cmp2);
}
- inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF32(target, nan, cc, cmp1, cmp2, bd);
+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
+ CompareIsNanF(D, cmp1, cmp2);
}
+ void BranchTrueShortF(Label* target);
+ void BranchFalseShortF(Label* target);
+
+ void BranchTrueF(Label* target);
+ void BranchFalseF(Label* target);
+
+ // MSA Branches
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
@@ -461,6 +461,12 @@ class TurboAssembler : public Assembler {
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
+ DEFINE_INSTRUCTION(Sle);
+ DEFINE_INSTRUCTION(Sleu);
+ DEFINE_INSTRUCTION(Sgt);
+ DEFINE_INSTRUCTION(Sgtu);
+ DEFINE_INSTRUCTION(Sge);
+ DEFINE_INSTRUCTION(Sgeu);
// MIPS32 R2 instruction macro.
DEFINE_INSTRUCTION(Ror);
@@ -561,6 +567,11 @@ class TurboAssembler : public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+ void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt,
+ Condition cond);
+
void Clz(Register rd, Register rs);
void Ctz(Register rd, Register rs);
void Popcnt(Register rd, Register rs);
@@ -573,22 +584,25 @@ class TurboAssembler : public Assembler {
Register left_high, Register right_low, Register right_high);
void ShlPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
+ Register src_high, Register shift, Register scratch1,
+ Register scratch2);
void ShlPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
+ Register src_high, uint32_t shift, Register scratch);
void ShrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
+ Register src_high, Register shift, Register scratch1,
+ Register scratch2);
void ShrPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
+ Register src_high, uint32_t shift, Register scratch);
void SarPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, Register shift);
+ Register src_high, Register shift, Register scratch1,
+ Register scratch2);
void SarPair(Register dst_low, Register dst_high, Register src_low,
- Register src_high, uint32_t shift);
+ Register src_high, uint32_t shift, Register scratch);
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -615,6 +629,18 @@ class TurboAssembler : public Assembler {
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
+ // Round double functions
+ void Trunc_d_d(FPURegister fd, FPURegister fs);
+ void Round_d_d(FPURegister fd, FPURegister fs);
+ void Floor_d_d(FPURegister fd, FPURegister fs);
+ void Ceil_d_d(FPURegister fd, FPURegister fs);
+
+ // Round float functions
+ void Trunc_s_s(FPURegister fd, FPURegister fs);
+ void Round_s_s(FPURegister fd, FPURegister fs);
+ void Floor_s_s(FPURegister fd, FPURegister fs);
+ void Ceil_s_s(FPURegister fd, FPURegister fs);
+
// FP32 mode: Move the general purpose register into
// the high part of the double-register pair.
// FP64 mode: Move the general-purpose register into
@@ -742,63 +768,19 @@ class TurboAssembler : public Assembler {
void Move(FPURegister dst, uint64_t src);
// -------------------------------------------------------------------------
- // Overflow handling functions.
- // Usage: first call the appropriate arithmetic function, then call one of the
- // jump functions with the overflow_dst register as the second parameter.
-
- inline void AddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- AddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void AddBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- AddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void AddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void AddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void SubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- SubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void SubBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- SubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void SubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void SubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
+ // Overflow operations.
+
+ // AddOverflow sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void AddOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // SubOverflow sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void SubOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow sets overflow register to zero if no overflow occured
+ void MulOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
// Number of instructions needed for calculation of switch table entry address
#ifdef _MIPS_ARCH_MIPS32R6
@@ -840,17 +822,6 @@ class TurboAssembler : public Assembler {
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
- // Alias functions for backward compatibility.
- inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
-
- inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(bd, target, nan, cc, cmp1, cmp2);
- }
-
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
@@ -874,14 +845,11 @@ class TurboAssembler : public Assembler {
void CallCFunctionHelper(Register function_base, int16_t function_offset,
int num_reg_arguments, int num_double_arguments);
- // Common implementation of BranchF functions for the different formats.
- void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
+ void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2);
- void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
+ void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2);
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
@@ -914,6 +882,14 @@ class TurboAssembler : public Assembler {
BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
+ template <typename RoundFunc>
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
+ template <typename RoundFunc>
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
@@ -1099,7 +1075,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1143,6 +1123,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index b55273eba5..a8b887fec8 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -6386,10 +6386,10 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case BC1EQZ:
- BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
+ BranchCompactHelper(!(get_fpu_register(ft_reg) & 0x1), 16);
break;
case BC1NEZ:
- BranchHelper(get_fpu_register(ft_reg) & 0x1);
+ BranchCompactHelper(get_fpu_register(ft_reg) & 0x1, 16);
break;
case BZ_V: {
msa_reg_t wt;
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index e05082ee40..4ba2d1e7db 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -80,10 +80,9 @@ Address RelocInfo::target_address() {
}
Address RelocInfo::target_address_address() {
- DCHECK(IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- rmode_ == EMBEDDED_OBJECT ||
- rmode_ == EXTERNAL_REFERENCE);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -186,6 +185,12 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
@@ -208,6 +213,12 @@ Address RelocInfo::target_internal_reference_address() {
return reinterpret_cast<Address>(pc_);
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -222,6 +233,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -249,6 +265,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 6c0bebebce..8f6f28678b 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -161,11 +161,9 @@ Register ToRegister(int num) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE |
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
-
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on MIPS means that it is a lui/ori instruction, and that is
@@ -3358,14 +3356,14 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
+ emit(instr, CompactBranchType::COMPACT_BRANCH);
}
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
+ emit(instr, CompactBranchType::COMPACT_BRANCH);
}
@@ -3404,16 +3402,20 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
// ---------- MSA instructions ------------
@@ -4016,7 +4018,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 9f1fe59de8..0971c43b00 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -325,9 +325,9 @@ const Simd128Register no_msareg = Simd128Register::no_reg();
// cp is assumed to be a callee saved register.
constexpr Register kRootRegister = s6;
constexpr Register cp = s7;
-constexpr Register kLithiumScratchReg = s3;
-constexpr Register kLithiumScratchReg2 = s4;
-constexpr DoubleRegister kLithiumScratchDouble = f30;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips64r6 for compare operations.
// We use the last non-callee saved odd register for N64 ABI
@@ -681,9 +681,6 @@ class Assembler : public AssemblerBase {
// Helper values.
LAST_CODE_MARKER,
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
- // Code aging
- CODE_AGE_MARKER_NOP = 6,
- CODE_AGE_SEQUENCE_NOP
};
// Type == 0 is the default non-marking nop. For mips this is a
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index f8075885a9..5d0639f1a9 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -42,7 +42,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register scratch = GetRegisterThatIsNotOneOf(result_reg);
Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
- DoubleRegister double_scratch = kLithiumScratchDouble;
+ DoubleRegister double_scratch = kScratchDoubleReg;
// Account for saved regs.
const int kArgumentOffset = 3 * kPointerSize;
@@ -178,53 +178,34 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = a7;
Label call_runtime, done, int_exponent;
- if (exponent_type() == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ Ldc1(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- Label int_exponent_convert;
- // Detect integer exponents stored as double.
- __ EmitFPUTruncate(kRoundToMinusInf,
- scratch,
- double_exponent,
- at,
- double_scratch,
- scratch2,
- kCheckForInexactConversion);
- // scratch2 == 0 means there was no conversion error.
- __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ EmitFPUTruncate(kRoundToMinusInf, scratch, double_exponent, at,
+ double_scratch, scratch2, kCheckForInexactConversion);
+ // scratch2 == 0 means there was no conversion error.
+ __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
- __ push(ra);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()),
- 0, 2);
- }
- __ pop(ra);
- __ MovFromFloatResult(double_result);
- __ jmp(&done);
-
- __ bind(&int_exponent_convert);
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch2);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
+ __ pop(ra);
+ __ MovFromFloatResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type() == INTEGER) {
- __ mov(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mov(exponent, scratch);
- }
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
__ mov_d(double_scratch, double_base); // Back up base.
__ Move(double_result, 1.0);
@@ -263,7 +244,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
- __ BranchF(&done, nullptr, ne, double_result, kDoubleRegZero);
+ __ CompareF64(EQ, double_result, kDoubleRegZero);
+ __ BranchFalseShortF(&done);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
@@ -465,6 +447,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
+ // Reset the masking register. This is done independent of the underlying
+ // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
+ // both configurations. It is safe to always do this, because the underlying
+ // register is caller-saved and can be arbitrarily clobbered.
+ __ ResetSpeculationPoisonRegister();
+
// Compute the handler entry address and jump to it.
__ li(t9, Operand(pending_handler_entrypoint_address));
__ Ld(t9, MemOperand(t9));
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 7b2fbd78a5..d51ead5a6c 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/mips64/simulator-mips64.h"
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/mips64/frame-constants-mips64.h
index 9c7455bcc5..22f01002c7 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/mips64/frame-constants-mips64.h
@@ -10,40 +10,42 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
+ static constexpr int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +1 * kPointerSize;
+ static constexpr int kCallerPCOffset = +1 * kPointerSize;
// MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static constexpr int kCallerSPOffset = +2 * kPointerSize;
// FP-relative displacement of the caller's SP.
- static const int kCallerSPDisplacement = +2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = +2 * kPointerSize;
- static const int kConstantPoolOffset = 0; // Not used.
+ static constexpr int kConstantPoolOffset = 0; // Not used.
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 79f486b4bb..69e9b06e54 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -38,6 +38,14 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
}
}
+static inline bool IsZero(const Operand& rt) {
+ if (rt.is_reg()) {
+ return rt.rm() == zero_reg;
+ } else {
+ return rt.immediate() == 0;
+ }
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -959,6 +967,70 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
+ Slt(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
+ Sltu(rd, rs, rt);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ slt(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ slt(rd, scratch, rs);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ sltu(rd, rt.rm(), rs);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rs != scratch);
+ li(scratch, rt);
+ sltu(rd, scratch, rs);
+ }
+}
+
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
@@ -1997,7 +2069,8 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF32(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF32(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
@@ -2020,7 +2093,8 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
- BranchF64(nullptr, &is_nan, eq, fs, fs);
+ CompareIsNanF64(fs, fs);
+ BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
@@ -2231,7 +2305,8 @@ void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF64(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -2265,7 +2340,8 @@ void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
- BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF32(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
@@ -2294,7 +2370,10 @@ void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
mov(result, zero_reg);
Move(scratch, -1.0);
// If fd =< -1 or unordered, then the conversion fails.
- BranchF(&fail, &fail, le, fd, scratch);
+ CompareF64(OLE, fd, scratch);
+ BranchTrueShortF(&fail);
+ CompareIsNanF64(fd, scratch);
+ BranchTrueShortF(&fail);
}
// Load 2^63 into scratch as its double representation.
@@ -2303,7 +2382,8 @@ void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
- BranchF(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF64(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
@@ -2345,7 +2425,10 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
mov(result, zero_reg);
Move(scratch, -1.0f);
// If fd =< -1 or unordered, then the conversion fails.
- BranchF32(&fail, &fail, le, fd, scratch);
+ CompareF32(OLE, fd, scratch);
+ BranchTrueShortF(&fail);
+ CompareIsNanF32(fd, scratch);
+ BranchTrueShortF(&fail);
}
{
@@ -2358,7 +2441,8 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
- BranchF32(&simple_convert, nullptr, lt, fd, scratch);
+ CompareF32(OLT, fd, scratch);
+ BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
@@ -2390,6 +2474,123 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
bind(&fail);
}
+template <typename RoundFunc>
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ if (kArchVariant == kMips64r6) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_d(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ Label done;
+ mfhc1(scratch, src);
+ Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
+ mov_d(dst, src);
+ round(this, dst, src);
+ dmfc1(at, dst);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_d_l(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ mthc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_l_d(dst, src);
+ });
+}
+
+void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_l_d(dst, src);
+ });
+}
+
+template <typename RoundFunc>
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode, RoundFunc round) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ if (kArchVariant == kMips64r6) {
+ cfc1(scratch, FCSR);
+ li(at, Operand(mode));
+ ctc1(at, FCSR);
+ rint_s(dst, src);
+ ctc1(scratch, FCSR);
+ } else {
+ int32_t kFloat32ExponentBias = 127;
+ int32_t kFloat32MantissaBits = 23;
+ int32_t kFloat32ExponentBits = 8;
+ Label done;
+ mfc1(scratch, src);
+ Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
+ Branch(USE_DELAY_SLOT, &done, hs, at,
+ Operand(kFloat32ExponentBias + kFloat32MantissaBits));
+ mov_s(dst, src);
+ round(this, dst, src);
+ mfc1(at, dst);
+ Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
+ cvt_s_w(dst, dst);
+ srl(at, scratch, 31);
+ sll(at, at, 31);
+ mtc1(at, dst);
+ bind(&done);
+ }
+}
+
+void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->floor_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->ceil_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->trunc_w_s(dst, src);
+ });
+}
+
+void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round,
+ [](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
+ tasm->round_w_s(dst, src);
+ });
+}
+
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
@@ -2418,203 +2619,65 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
sub_d(fd, scratch, fr);
}
-void TurboAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
- Label* nan, Condition cond, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (cond == al) {
- Branch(bd, target);
- return;
- }
-
+void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
+ FPURegister cmp1, FPURegister cmp2) {
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
+ DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
+ cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
+ } else {
+ c(cc, sizeField, cmp1, cmp2);
}
+}
- DCHECK(nan || target);
- // Check for unordered (NaN) cases.
- if (nan) {
- bool long_branch =
- nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
- if (kArchVariant != kMips64r6) {
- if (long_branch) {
- Label skip;
- c(UN, sizeField, cmp1, cmp2);
- bc1f(&skip);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- c(UN, sizeField, cmp1, cmp2);
- bc1t(nan);
- if (bd == PROTECT) {
- nop();
- }
- }
- } else {
- // Use kDoubleCompareReg for comparison result. It has to be unavailable
- // to lithium
- // register allocator.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- if (long_branch) {
- Label skip;
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(&skip, kDoubleCompareReg);
- nop();
- BranchLong(nan, bd);
- bind(&skip);
- } else {
- cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(nan, kDoubleCompareReg);
- if (bd == PROTECT) {
- nop();
- }
- }
- }
+void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareF(sizeField, UN, cmp1, cmp2);
+}
+
+void TurboAssembler::BranchTrueShortF(Label* target) {
+ if (kArchVariant == kMips64r6) {
+ bc1nez(target, kDoubleCompareReg);
+ nop();
+ } else {
+ bc1t(target);
+ nop();
}
+}
- if (target) {
- bool long_branch =
- target->is_bound() ? !is_near(target) : is_trampoline_emitted();
- if (long_branch) {
- Label skip;
- Condition neg_cond = NegateFpuCondition(cond);
- BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
- BranchLong(target, bd);
- bind(&skip);
- } else {
- BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
- }
+void TurboAssembler::BranchFalseShortF(Label* target) {
+ if (kArchVariant == kMips64r6) {
+ bc1eqz(target, kDoubleCompareReg);
+ nop();
+ } else {
+ bc1f(target);
+ nop();
}
}
-void TurboAssembler::BranchShortF(SecondaryField sizeField, Label* target,
- Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd) {
- if (kArchVariant != kMips64r6) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- switch (cc) {
- case lt:
- c(OLT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ult:
- c(ULT, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case gt:
- c(ULE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ugt:
- c(OLE, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ge:
- c(ULT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case uge:
- c(OLT, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case le:
- c(OLE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ule:
- c(ULE, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case eq:
- c(EQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ueq:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1t(target);
- break;
- case ne: // Unordered or not equal.
- c(EQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- case ogl:
- c(UEQ, sizeField, cmp1, cmp2);
- bc1f(target);
- break;
- default:
- CHECK(0);
- }
- }
+void TurboAssembler::BranchTrueF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
} else {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (target) {
- // Here NaN cases were either handled by this function or are assumed to
- // have been handled by the caller.
- // Unsigned conditions are treated as their signed counterpart.
- // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
- // 1) mode.
- DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
- switch (cc) {
- case lt:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ult:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case gt:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ugt:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ge:
- cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case uge:
- cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case le:
- cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ule:
- cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case eq:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ueq:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1nez(target, kDoubleCompareReg);
- break;
- case ne:
- cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- case ogl:
- cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
- bc1eqz(target, kDoubleCompareReg);
- break;
- default:
- CHECK(0);
- }
- }
+ BranchTrueShortF(target);
}
+}
- if (bd == PROTECT) {
- nop();
+void TurboAssembler::BranchFalseF(Label* target) {
+ bool long_branch =
+ target->is_bound() ? !is_near(target) : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip);
+ BranchLong(target, PROTECT);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target);
}
}
@@ -2766,6 +2829,115 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
}
}
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
+ const Operand& rt, Condition cond) {
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() == 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionZero(rd, rs);
+ } else {
+ Dsubu(t9, rs, rt);
+ LoadZeroIfConditionZero(rd, t9);
+ }
+ break;
+ case ne:
+ if (rs == zero_reg) {
+ if (rt.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rt.rm());
+ } else {
+ if (rt.immediate() != 0) {
+ mov(rd, zero_reg);
+ } else {
+ nop();
+ }
+ }
+ } else if (IsZero(rt)) {
+ LoadZeroIfConditionNotZero(rd, rs);
+ } else {
+ Dsubu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ break;
+ case greater_equal:
+ Sge(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case less:
+ Slt(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case less_equal:
+ Sle(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs > rt
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs >= rt
+ break;
+ case Uless:
+ Sltu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs < rt
+ break;
+ case Uless_equal:
+ Sleu(t9, rs, rt);
+ LoadZeroIfConditionNotZero(rd, t9);
+ // rs <= rt
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ if (kArchVariant == kMips64r6) {
+ seleqz(dest, dest, condition);
+ } else {
+ Movn(dest, zero_reg, condition);
+ }
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ if (kArchVariant == kMips64r6) {
+ selnez(dest, dest, condition);
+ } else {
+ Movz(dest, zero_reg, condition);
+ }
+}
+
void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
movt(rd, rs, cc);
}
@@ -2926,7 +3098,8 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, nullptr, eq, double_input, double_scratch);
+ CompareF64(EQ, double_input, double_scratch);
+ BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
@@ -2972,7 +3145,7 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
- DoubleRegister single_scratch = kLithiumScratchDouble.low();
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
@@ -3123,14 +3296,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
-static inline bool IsZero(const Operand& rt) {
- if (rt.is_reg()) {
- return rt.rm() == zero_reg;
- } else {
- return rt.immediate() == 0;
- }
-}
-
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@@ -4397,28 +4562,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- li(t0, Operand(debug_is_active));
- Lb(t0, MemOperand(t0));
- Branch(&skip_hook, eq, t0, Operand(zero_reg));
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
Lb(t0, MemOperand(t0));
- Branch(&call_hook, ne, t0, Operand(zero_reg));
-
- Ld(t0, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(t0, &skip_hook);
- Ld(t0, FieldMemOperand(t0, DebugInfo::kFlagsOffset));
- And(t0, t0, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4578,231 +4729,90 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
-static inline void BranchOvfHelper(TurboAssembler* tasm, Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
+void TurboAssembler::DaddOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- tasm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
+ right_reg = right.rm();
}
-}
-void TurboAssembler::DaddBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- if (right.is_reg()) {
- DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
- } else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- li(overflow_dst, right); // Load right.
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Daddu(dst, left, overflow_dst); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, overflow_dst);
- and_(overflow_dst, overflow_dst, scratch);
- } else {
- Daddu(dst, left, overflow_dst);
- xor_(scratch, dst, overflow_dst);
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::DaddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(right != overflow_dst);
- DCHECK(left != scratch);
- DCHECK(right != scratch);
-
- if (left == right && dst == left) {
- mov(overflow_dst, right);
- right = overflow_dst;
- }
-
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- daddu(dst, left, right); // Left is overwritten.
- xor_(scratch, dst, scratch); // Original left.
- xor_(overflow_dst, dst, right);
- and_(overflow_dst, overflow_dst, scratch);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- daddu(dst, left, right); // Right is overwritten.
- xor_(scratch, dst, scratch); // Original right.
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, overflow_dst, scratch);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ daddu(scratch, left, right_reg);
+ xor_(overflow, scratch, left);
+ xor_(at, scratch, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
} else {
- daddu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, dst, right);
- and_(overflow_dst, scratch, overflow_dst);
+ daddu(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(at, dst, right_reg);
+ and_(overflow, overflow, at);
}
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
}
-void TurboAssembler::DsubBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
+void TurboAssembler::DsubOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
- li(overflow_dst, right); // Load right.
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- Dsubu(dst, left, overflow_dst); // Left is overwritten.
- xor_(overflow_dst, scratch, overflow_dst); // scratch is original left.
- xor_(scratch, dst, scratch); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- Dsubu(dst, left, overflow_dst);
- xor_(scratch, left, overflow_dst);
- xor_(overflow_dst, dst, left);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
- }
-}
-
-void TurboAssembler::DsubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
-
- // This happens with some crankshaft code. Since Subu works fine if
- // left == right, let's not make that restriction here.
- if (left == right) {
- mov(dst, zero_reg);
- if (no_overflow_label) {
- Branch(no_overflow_label);
- }
+ right_reg = right.rm();
}
- if (dst == left) {
- mov(scratch, left); // Preserve left.
- dsubu(dst, left, right); // Left is overwritten.
- xor_(overflow_dst, dst, scratch); // scratch is original left.
- xor_(scratch, scratch, right); // scratch is original left.
- and_(overflow_dst, scratch, overflow_dst);
- } else if (dst == right) {
- mov(scratch, right); // Preserve right.
- dsubu(dst, left, right); // Right is overwritten.
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, scratch); // Original right.
- and_(overflow_dst, scratch, overflow_dst);
- } else {
- dsubu(dst, left, right);
- xor_(overflow_dst, dst, left);
- xor_(scratch, left, right);
- and_(overflow_dst, scratch, overflow_dst);
- }
- BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
-}
-
-static inline void BranchOvfHelperMult(TurboAssembler* tasm,
- Register overflow_dst,
- Label* overflow_label,
- Label* no_overflow_label) {
- DCHECK(overflow_label || no_overflow_label);
- if (!overflow_label) {
- DCHECK(no_overflow_label);
- tasm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ dsubu(scratch, left, right_reg);
+ xor_(overflow, left, scratch);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
+ mov(dst, scratch);
} else {
- tasm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
- if (no_overflow_label) tasm->Branch(no_overflow_label);
+ dsubu(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(at, left, right_reg);
+ and_(overflow, overflow, at);
}
}
-void TurboAssembler::MulBranchOvf(Register dst, Register left,
- const Operand& right, Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- if (right.is_reg()) {
- MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
- scratch);
+void TurboAssembler::MulOverflow(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ Register right_reg = no_reg;
+ Register scratch = t8;
+ if (!right.is_reg()) {
+ li(at, Operand(right));
+ right_reg = at;
} else {
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(left != overflow_dst);
- DCHECK(left != scratch);
-
- if (dst == left) {
- Mul(scratch, left, static_cast<int32_t>(right.immediate()));
- Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
- mov(dst, scratch);
- } else {
- Mul(dst, left, static_cast<int32_t>(right.immediate()));
- Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
- }
-
- dsra32(scratch, dst, 0);
- xor_(overflow_dst, overflow_dst, scratch);
-
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ right_reg = right.rm();
}
-}
-void TurboAssembler::MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label,
- Label* no_overflow_label, Register scratch) {
- DCHECK(overflow_label || no_overflow_label);
- Register overflow_dst = t9;
- DCHECK(dst != scratch);
- DCHECK(dst != overflow_dst);
- DCHECK(scratch != overflow_dst);
- DCHECK(overflow_dst != left);
- DCHECK(overflow_dst != right);
- DCHECK(scratch != left);
- DCHECK(scratch != right);
+ DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
+ overflow != scratch);
+ DCHECK(overflow != left && overflow != right_reg);
- if (dst == left || dst == right) {
- Mul(scratch, left, right);
- Mulh(overflow_dst, left, right);
+ if (dst == left || dst == right_reg) {
+ Mul(scratch, left, right_reg);
+ Mulh(overflow, left, right_reg);
mov(dst, scratch);
} else {
- Mul(dst, left, right);
- Mulh(overflow_dst, left, right);
+ Mul(dst, left, right_reg);
+ Mulh(overflow, left, right_reg);
}
dsra32(scratch, dst, 0);
- xor_(overflow_dst, overflow_dst, scratch);
-
- BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
+ xor_(overflow, overflow, scratch);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@@ -4861,12 +4871,19 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bd);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
- li(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<uint64_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5269,6 +5286,20 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
+ Lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5339,15 +5370,18 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
max_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_right, nullptr, lt, src1, src2);
- BranchF32(&return_left, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
mfc1(t8, src1);
@@ -5383,15 +5417,18 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF32(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
min_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF32(&return_left, nullptr, lt, src1, src2);
- BranchF32(&return_right, nullptr, lt, src2, src1);
+ CompareF32(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF32(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
mfc1(t8, src1);
@@ -5427,15 +5464,18 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
max_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_right, nullptr, lt, src1, src2);
- BranchF64(&return_left, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_right);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_left);
// Left equals right => check for -0.
dmfc1(t8, src1);
@@ -5470,15 +5510,18 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
}
// Check if one of operands is NaN.
- BranchF64(nullptr, out_of_line, eq, src1, src2);
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
min_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
- BranchF64(&return_left, nullptr, lt, src1, src2);
- BranchF64(&return_right, nullptr, lt, src2, src1);
+ CompareF64(OLT, src1, src2);
+ BranchTrueShortF(&return_left);
+ CompareF64(OLT, src2, src1);
+ BranchTrueShortF(&return_right);
// Left equals right => check for -0.
dmfc1(t8, src1);
@@ -5705,7 +5748,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
+void TurboAssembler::ResetSpeculationPoisonRegister() {
+ li(kSpeculationPoisonRegister, -1);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index f623f7f3cb..23ce8d2a96 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -239,41 +239,30 @@ class TurboAssembler : public Assembler {
#undef COND_TYPED_ARGS
#undef COND_ARGS
- // Wrapper functions for the different cmp/branch types.
- inline void BranchF32(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(S, target, nan, cc, cmp1, cmp2, bd);
+ // Floating point branches
+ void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
+ CompareF(S, cc, cmp1, cmp2);
}
- inline void BranchF64(Label* target, Label* nan, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT) {
- BranchFCommon(D, target, nan, cc, cmp1, cmp2, bd);
+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
+ CompareIsNanF(S, cmp1, cmp2);
}
- // Alternate (inline) version for better readability with USE_DELAY_SLOT.
- inline void BranchF64(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
+ void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
+ CompareF(D, cc, cmp1, cmp2);
}
- inline void BranchF32(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF32(target, nan, cc, cmp1, cmp2, bd);
+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
+ CompareIsNanF(D, cmp1, cmp2);
}
- // Alias functions for backward compatibility.
- inline void BranchF(Label* target, Label* nan, Condition cc, FPURegister cmp1,
- FPURegister cmp2, BranchDelaySlot bd = PROTECT) {
- BranchF64(target, nan, cc, cmp1, cmp2, bd);
- }
+ void BranchTrueShortF(Label* target);
+ void BranchFalseShortF(Label* target);
- inline void BranchF(BranchDelaySlot bd, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2) {
- BranchF64(bd, target, nan, cc, cmp1, cmp2);
- }
+ void BranchTrueF(Label* target);
+ void BranchFalseF(Label* target);
+ // MSA branches
void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
@@ -495,6 +484,12 @@ class TurboAssembler : public Assembler {
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
+ DEFINE_INSTRUCTION(Sle);
+ DEFINE_INSTRUCTION(Sleu);
+ DEFINE_INSTRUCTION(Sgt);
+ DEFINE_INSTRUCTION(Sgtu);
+ DEFINE_INSTRUCTION(Sge);
+ DEFINE_INSTRUCTION(Sgeu);
// MIPS32 R2 instruction macro.
DEFINE_INSTRUCTION(Ror);
@@ -603,6 +598,11 @@ class TurboAssembler : public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+ void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt,
+ Condition cond);
+
void Clz(Register rd, Register rs);
void Ctz(Register rd, Register rs);
void Dctz(Register rd, Register rs);
@@ -756,59 +756,17 @@ class TurboAssembler : public Assembler {
void Move(FPURegister dst, uint32_t src);
void Move(FPURegister dst, uint64_t src);
- inline void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void MulBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- MulBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void MulBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void MulBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void DaddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- DaddBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void DaddBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- DaddBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void DaddBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void DaddBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- inline void DsubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Register scratch = at) {
- DsubBranchOvf(dst, left, right, overflow_label, nullptr, scratch);
- }
-
- inline void DsubBranchNoOvf(Register dst, Register left, const Operand& right,
- Label* no_overflow_label, Register scratch = at) {
- DsubBranchOvf(dst, left, right, nullptr, no_overflow_label, scratch);
- }
-
- void DsubBranchOvf(Register dst, Register left, const Operand& right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
-
- void DsubBranchOvf(Register dst, Register left, Register right,
- Label* overflow_label, Label* no_overflow_label,
- Register scratch = at);
+ // DaddOverflow sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void DaddOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // DsubOverflow sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void DsubOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow sets overflow register to zero if no overflow occured
+ void MulOverflow(Register dst, Register left, const Operand& right,
+ Register overflow);
// Number of instructions needed for calculation of switch table entry address
#ifdef _MIPS_ARCH_MIPS64R6
@@ -866,6 +824,18 @@ class TurboAssembler : public Assembler {
void Trunc_ul_s(FPURegister fd, Register rs, FPURegister scratch,
Register result = no_reg);
+ // Round double functions
+ void Trunc_d_d(FPURegister fd, FPURegister fs);
+ void Round_d_d(FPURegister fd, FPURegister fs);
+ void Floor_d_d(FPURegister fd, FPURegister fs);
+ void Ceil_d_d(FPURegister fd, FPURegister fs);
+
+ // Round float functions
+ void Trunc_s_s(FPURegister fd, FPURegister fs);
+ void Round_s_s(FPURegister fd, FPURegister fs);
+ void Floor_s_s(FPURegister fd, FPURegister fs);
+ void Ceil_s_s(FPURegister fd, FPURegister fs);
+
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
@@ -901,6 +871,12 @@ class TurboAssembler : public Assembler {
Handle<HeapObject> code_object_;
bool has_double_zero_reg_set_;
+ void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
+ FPURegister cmp2);
+
+ void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
+ FPURegister cmp2);
+
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
MSARegister wt, BranchDelaySlot bd = PROTECT);
@@ -911,15 +887,6 @@ class TurboAssembler : public Assembler {
bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
Register& scratch, const Operand& rt);
- // Common implementation of BranchF functions for the different formats.
- void BranchFCommon(SecondaryField sizeField, Label* target, Label* nan,
- Condition cc, FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
- void BranchShortF(SecondaryField sizeField, Label* target, Condition cc,
- FPURegister cmp1, FPURegister cmp2,
- BranchDelaySlot bd = PROTECT);
-
void BranchShortHelperR6(int32_t offset, Label* L);
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
@@ -945,6 +912,14 @@ class TurboAssembler : public Assembler {
void BranchLong(Label* L, BranchDelaySlot bdslot);
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
+ template <typename RoundFunc>
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
+ template <typename RoundFunc>
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
+ RoundFunc round);
+
// Push a fixed frame, consisting of ra, fp.
void PushCommonFrame(Register marker_reg = no_reg);
};
@@ -1168,7 +1143,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
// -------------------------------------------------------------------------
// StatsCounter support.
@@ -1233,6 +1212,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 9177f8e6aa..53dacdc330 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -6625,10 +6625,10 @@ void Simulator::DecodeTypeImmediate() {
break;
}
case BC1EQZ:
- BranchHelper(!(get_fpu_register(ft_reg) & 0x1));
+ BranchCompactHelper(!(get_fpu_register(ft_reg) & 0x1), 16);
break;
case BC1NEZ:
- BranchHelper(get_fpu_register(ft_reg) & 0x1);
+ BranchCompactHelper((get_fpu_register(ft_reg) & 0x1), 16);
break;
case BZ_V: {
msa_reg_t wt;
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index a3e51e15e7..b2f3643091 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -10,6 +10,7 @@
#include "src/objects-body-descriptors.h"
#include "src/objects/hash-table.h"
#include "src/transitions.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -19,30 +20,32 @@ int FlexibleBodyDescriptor<start_offset>::SizeOf(Map* map, HeapObject* object) {
return object->SizeFromMap(map);
}
-bool BodyDescriptorBase::IsValidSlotImpl(HeapObject* obj, int offset) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+bool BodyDescriptorBase::IsValidSlotImpl(Map* map, HeapObject* obj,
+ int offset) {
+ if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
return true;
} else {
DCHECK(FLAG_unbox_double_fields);
DCHECK(IsAligned(offset, kPointerSize));
- LayoutDescriptorHelper helper(obj->map());
+ LayoutDescriptorHelper helper(map);
DCHECK(!helper.all_fields_tagged());
return helper.IsTagged(offset);
}
}
template <typename ObjectVisitor>
-void BodyDescriptorBase::IterateBodyImpl(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v) {
- if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+void BodyDescriptorBase::IterateBodyImpl(Map* map, HeapObject* obj,
+ int start_offset, int end_offset,
+ ObjectVisitor* v) {
+ if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
IteratePointers(obj, start_offset, end_offset, v);
} else {
DCHECK(FLAG_unbox_double_fields);
DCHECK(IsAligned(start_offset, kPointerSize) &&
IsAligned(end_offset, kPointerSize));
- LayoutDescriptorHelper helper(obj->map());
+ LayoutDescriptorHelper helper(map);
DCHECK(!helper.all_fields_tagged());
for (int offset = start_offset; offset < end_offset;) {
int end_of_region_offset;
@@ -69,19 +72,32 @@ void BodyDescriptorBase::IteratePointer(HeapObject* obj, int offset,
v->VisitPointer(obj, HeapObject::RawField(obj, offset));
}
+template <typename ObjectVisitor>
+DISABLE_CFI_PERF void BodyDescriptorBase::IterateMaybeWeakPointers(
+ HeapObject* obj, int start_offset, int end_offset, ObjectVisitor* v) {
+ v->VisitPointers(obj, HeapObject::RawMaybeWeakField(obj, start_offset),
+ HeapObject::RawMaybeWeakField(obj, end_offset));
+}
+
+template <typename ObjectVisitor>
+void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject* obj, int offset,
+ ObjectVisitor* v) {
+ v->VisitPointer(obj, HeapObject::RawMaybeWeakField(obj, offset));
+}
+
class JSObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
if (offset < kStartOffset) return false;
- return IsValidSlotImpl(obj, offset);
+ return IsValidSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IterateBodyImpl(obj, kStartOffset, object_size, v);
+ IterateBodyImpl(map, obj, kStartOffset, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -93,12 +109,12 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = JSReceiver::kPropertiesOrHashOffset;
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return offset >= kStartOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, kStartOffset, object_size, v);
}
@@ -110,20 +126,21 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
if (offset < kSizeWithoutPrototype) return true;
- if (offset < kSizeWithPrototype && obj->map()->has_prototype_slot()) {
+ if (offset < kSizeWithPrototype && map->has_prototype_slot()) {
return true;
}
- return IsValidSlotImpl(obj, offset);
+ return IsValidSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- int header_size = JSFunction::cast(obj)->GetHeaderSize();
+ int header_size = JSFunction::GetHeaderSize(map->has_prototype_slot());
+ DCHECK_EQ(header_size, JSObject::GetHeaderSize(map));
IteratePointers(obj, kPropertiesOrHashOffset, header_size, v);
- IterateBodyImpl(obj, header_size, object_size, v);
+ IterateBodyImpl(map, obj, header_size, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -134,23 +151,23 @@ class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
- STATIC_ASSERT(kAllocationLengthOffset + kPointerSize == kBitFieldSlot);
+ STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
- static bool IsValidSlot(HeapObject* obj, int offset) {
- if (offset < kAllocationLengthOffset) return true;
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ if (offset < kBitFieldSlot) return true;
if (offset < kSize) return false;
- return IsValidSlotImpl(obj, offset);
+ return IsValidSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
// Array buffers contain raw pointers that the GC does not know about. These
// are stored at kBackStoreOffset and later, so we do not iterate over
// those.
IteratePointers(obj, kPropertiesOrHashOffset, kBackingStoreOffset, v);
- IterateBodyImpl(obj, kSize, object_size, v);
+ IterateBodyImpl(map, obj, kSize, object_size, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -162,14 +179,14 @@ template <typename Derived>
class SmallOrderedHashTable<Derived>::BodyDescriptor final
: public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
Derived* table = reinterpret_cast<Derived*>(obj);
if (offset < table->GetDataTableStartOffset()) return false;
- return IsValidSlotImpl(obj, offset);
+ return IsValidSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
Derived* table = reinterpret_cast<Derived*>(obj);
int start = table->GetDataTableStartOffset();
@@ -186,10 +203,12 @@ class SmallOrderedHashTable<Derived>::BodyDescriptor final
class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -199,13 +218,13 @@ class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return offset >= kConstantPoolOffset &&
offset <= kSourcePositionTableOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kConstantPoolOffset, v);
IteratePointer(obj, kHandlerTableOffset, v);
@@ -220,10 +239,12 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
class BigInt::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -233,10 +254,12 @@ class BigInt::BodyDescriptor final : public BodyDescriptorBase {
class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -247,12 +270,12 @@ class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return offset == kBasePointerOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kBasePointerOffset, v);
}
@@ -262,18 +285,51 @@ class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class WeakFixedArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= kHeaderSize;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IterateMaybeWeakPointers(obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return object->SizeFromMap(map);
+ }
+};
+
+class FeedbackMetadata::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return FeedbackMetadata::SizeFor(
+ FeedbackMetadata::cast(obj)->synchronized_slot_count());
+ }
+};
+
class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return offset == kSharedFunctionInfoOffset ||
offset == kOptimizedCodeOffset || offset >= kFeedbackSlotsOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kSharedFunctionInfoOffset, v);
- IteratePointer(obj, kOptimizedCodeOffset, v);
+ IterateMaybeWeakPointer(obj, kOptimizedCodeOffset, v);
IteratePointers(obj, kFeedbackSlotsOffset, object_size, v);
}
@@ -288,18 +344,18 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
STATIC_ASSERT(kTableOffset + kPointerSize == kNextOffset);
STATIC_ASSERT(kNextOffset + kPointerSize == kSize);
- static bool IsValidSlot(HeapObject* obj, int offset) {
- return IsValidSlotImpl(obj, offset);
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return IsValidSlotImpl(map, obj, offset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
if (body_visiting_policy == kIgnoreWeakness) {
- IterateBodyImpl(obj, kPropertiesOrHashOffset, object_size, v);
+ IterateBodyImpl(map, obj, kPropertiesOrHashOffset, object_size, v);
} else {
IteratePointers(obj, kPropertiesOrHashOffset, kTableOffset, v);
- IterateBodyImpl(obj, kSize, object_size, v);
+ IterateBodyImpl(map, obj, kSize, object_size, v);
}
}
@@ -310,10 +366,12 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
class Foreign::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
v->VisitExternalReference(Foreign::cast(obj),
reinterpret_cast<Address*>(HeapObject::RawField(
@@ -325,24 +383,26 @@ class Foreign::BodyDescriptor final : public BodyDescriptorBase {
class ExternalOneByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
- ObjectVisitor* v) {
- }
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
};
class ExternalTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
- ObjectVisitor* v) {
- }
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
};
@@ -354,23 +414,22 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
STATIC_ASSERT(kDeoptimizationDataOffset + kPointerSize ==
kSourcePositionTableOffset);
STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
- kProtectedInstructionsOffset);
- STATIC_ASSERT(kProtectedInstructionsOffset + kPointerSize ==
kCodeDataContainerOffset);
STATIC_ASSERT(kCodeDataContainerOffset + kPointerSize == kDataStart);
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
// Slots in code can't be invalid because we never trim code objects.
return true;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// GC does not visit data/code in the header and in the body directly.
@@ -383,9 +442,9 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IterateBody(obj, v);
+ IterateBody(map, obj, v);
}
static inline int SizeOf(Map* map, HeapObject* object) {
@@ -395,10 +454,12 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -409,10 +470,12 @@ class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
public:
- static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return false;
+ }
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -421,48 +484,105 @@ class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ if (offset < kMemoryStartOffset) return true;
+ if (offset < kCompiledModuleOffset) return false;
+ return IsValidSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, kPropertiesOrHashOffset, kFirstUntaggedOffset, v);
+ IterateBodyImpl(map, obj, kSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* object) {
+ return map->instance_size();
+ }
+};
+
+class Map::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
+ return offset >= Map::kPointerFieldsBeginOffset &&
+ offset < Map::kPointerFieldsEndOffset;
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ IteratePointers(obj, Map::kPointerFieldsBeginOffset,
+ Map::kTransitionsOrPrototypeInfoOffset, v);
+ IterateMaybeWeakPointer(obj, kTransitionsOrPrototypeInfoOffset, v);
+ IteratePointers(obj, Map::kTransitionsOrPrototypeInfoOffset + kPointerSize,
+ Map::kPointerFieldsEndOffset, v);
+ }
+
+ static inline int SizeOf(Map* map, HeapObject* obj) { return Map::kSize; }
+};
+
template <typename Op, typename ReturnType, typename T1, typename T2,
- typename T3>
-ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
+ typename T3, typename T4>
+ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
if (type < FIRST_NONSTRING_TYPE) {
switch (type & kStringRepresentationMask) {
case kSeqStringTag:
return ReturnType();
case kConsStringTag:
- return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3, p4);
case kThinStringTag:
- return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3, p4);
case kSlicedStringTag:
- return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3, p4);
case kExternalStringTag:
if ((type & kStringEncodingMask) == kOneByteStringTag) {
return Op::template apply<ExternalOneByteString::BodyDescriptor>(
- p1, p2, p3);
+ p1, p2, p3, p4);
} else {
return Op::template apply<ExternalTwoByteString::BodyDescriptor>(
- p1, p2, p3);
+ p1, p2, p3, p4);
}
}
UNREACHABLE();
}
switch (type) {
- case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
+ case HASH_TABLE_TYPE:
case SCOPE_INFO_TYPE:
- return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3);
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
+ return Op::template apply<FixedArray::BodyDescriptor>(p1, p2, p3, p4);
+ case WEAK_FIXED_ARRAY_TYPE:
+ return Op::template apply<WeakFixedArray::BodyDescriptor>(p1, p2, p3, p4);
case FIXED_DOUBLE_ARRAY_TYPE:
return ReturnType();
+ case FEEDBACK_METADATA_TYPE:
+ return Op::template apply<FeedbackMetadata::BodyDescriptor>(p1, p2, p3,
+ p4);
case PROPERTY_ARRAY_TYPE:
- return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<PropertyArray::BodyDescriptor>(p1, p2, p3, p4);
case DESCRIPTOR_ARRAY_TYPE:
- return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<DescriptorArray::BodyDescriptor>(p1, p2, p3,
+ p4);
case TRANSITION_ARRAY_TYPE:
- return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<TransitionArray::BodyDescriptor>(p1, p2, p3,
+ p4);
case FEEDBACK_CELL_TYPE:
- return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<FeedbackCell::BodyDescriptor>(p1, p2, p3, p4);
case FEEDBACK_VECTOR_TYPE:
- return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<FeedbackVector::BodyDescriptor>(p1, p2, p3, p4);
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
@@ -474,6 +594,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
+ case JS_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
@@ -485,11 +606,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
-
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
-
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
@@ -497,48 +614,53 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_BOUND_FUNCTION_TYPE:
- case WASM_INSTANCE_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
- return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<JSObject::BodyDescriptor>(p1, p2, p3, p4);
+ case WASM_INSTANCE_TYPE:
+ return Op::template apply<WasmInstanceObject::BodyDescriptor>(p1, p2, p3,
+ p4);
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
- return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<JSWeakCollection::BodyDescriptor>(p1, p2, p3,
+ p4);
case JS_ARRAY_BUFFER_TYPE:
- return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<JSArrayBuffer::BodyDescriptor>(p1, p2, p3, p4);
case JS_FUNCTION_TYPE:
- return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<JSFunction::BodyDescriptor>(p1, p2, p3, p4);
case ODDBALL_TYPE:
- return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Oddball::BodyDescriptor>(p1, p2, p3, p4);
case JS_PROXY_TYPE:
- return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<JSProxy::BodyDescriptor>(p1, p2, p3, p4);
case FOREIGN_TYPE:
- return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Foreign::BodyDescriptor>(p1, p2, p3, p4);
case MAP_TYPE:
- return Op::template apply<Map::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Map::BodyDescriptor>(p1, p2, p3, p4);
case CODE_TYPE:
- return Op::template apply<Code::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Code::BodyDescriptor>(p1, p2, p3, p4);
case CELL_TYPE:
- return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Cell::BodyDescriptor>(p1, p2, p3, p4);
case PROPERTY_CELL_TYPE:
- return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<PropertyCell::BodyDescriptor>(p1, p2, p3, p4);
case WEAK_CELL_TYPE:
- return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<WeakCell::BodyDescriptor>(p1, p2, p3, p4);
case SYMBOL_TYPE:
- return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<Symbol::BodyDescriptor>(p1, p2, p3, p4);
case BYTECODE_ARRAY_TYPE:
- return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<BytecodeArray::BodyDescriptor>(p1, p2, p3, p4);
case SMALL_ORDERED_HASH_SET_TYPE:
return Op::template apply<
SmallOrderedHashTable<SmallOrderedHashSet>::BodyDescriptor>(p1, p2,
- p3);
+ p3, p4);
case SMALL_ORDERED_HASH_MAP_TYPE:
return Op::template apply<
SmallOrderedHashTable<SmallOrderedHashMap>::BodyDescriptor>(p1, p2,
- p3);
+ p3, p4);
case CODE_DATA_CONTAINER_TYPE:
- return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<CodeDataContainer::BodyDescriptor>(p1, p2, p3,
+ p4);
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
@@ -547,27 +669,31 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case BIGINT_TYPE:
return ReturnType();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3);
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE: \
+ return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3, \
+ p4);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
case SHARED_FUNCTION_INFO_TYPE: {
- return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3,
+ p4);
}
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
if (type == ALLOCATION_SITE_TYPE) {
- return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3);
+ return Op::template apply<AllocationSite::BodyDescriptor>(p1, p2, p3,
+ p4);
} else {
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
+ case CALL_HANDLER_INFO_TYPE:
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
- return Op::template apply<StructBodyDescriptor>(p1, p2, p3);
+ return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
default:
PrintF("Unknown type: %d\n", type);
UNREACHABLE();
@@ -585,21 +711,22 @@ void HeapObject::IterateFast(ObjectVisitor* v) {
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(ObjectVisitor* v) {
Map* m = map();
- IterateBodyFast(m->instance_type(), SizeFromMap(m), v);
+ IterateBodyFast(m, SizeFromMap(m), v);
}
struct CallIterateBody {
template <typename BodyDescriptor, typename ObjectVisitor>
- static void apply(HeapObject* obj, int object_size, ObjectVisitor* v) {
- BodyDescriptor::IterateBody(obj, object_size, v);
+ static void apply(Map* map, HeapObject* obj, int object_size,
+ ObjectVisitor* v) {
+ BodyDescriptor::IterateBody(map, obj, object_size, v);
}
};
template <typename ObjectVisitor>
-void HeapObject::IterateBodyFast(InstanceType type, int object_size,
- ObjectVisitor* v) {
- BodyDescriptorApply<CallIterateBody, void>(type, this, object_size, v);
+void HeapObject::IterateBodyFast(Map* map, int object_size, ObjectVisitor* v) {
+ BodyDescriptorApply<CallIterateBody, void>(map->instance_type(), map, this,
+ object_size, v);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index a867fc1527..d6baf86e11 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -18,13 +18,13 @@ namespace internal {
// It is used for invalid slots filtering. If the offset points outside
// of the object or to the map word, the result is UNDEFINED (!!!).
//
-// static bool IsValidSlot(HeapObject* obj, int offset);
+// static bool IsValidSlot(Map* map, HeapObject* obj, int offset);
//
//
// 2) Iterate object's body using stateful object visitor.
//
// template <typename ObjectVisitor>
-// static inline void IterateBody(HeapObject* obj, int object_size,
+// static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
// ObjectVisitor* v);
class BodyDescriptorBase BASE_EMBEDDED {
public:
@@ -36,14 +36,23 @@ class BodyDescriptorBase BASE_EMBEDDED {
static inline void IteratePointer(HeapObject* obj, int offset,
ObjectVisitor* v);
+ template <typename ObjectVisitor>
+ static inline void IterateMaybeWeakPointers(HeapObject* obj, int start_offset,
+ int end_offset, ObjectVisitor* v);
+
+ template <typename ObjectVisitor>
+ static inline void IterateMaybeWeakPointer(HeapObject* obj, int offset,
+ ObjectVisitor* v);
+
protected:
// Returns true for all header and embedder fields.
- static inline bool IsValidSlotImpl(HeapObject* obj, int offset);
+ static inline bool IsValidSlotImpl(Map* map, HeapObject* obj, int offset);
// Treats all header and embedder fields in the range as tagged.
template <typename ObjectVisitor>
- static inline void IterateBodyImpl(HeapObject* obj, int start_offset,
- int end_offset, ObjectVisitor* v);
+ static inline void IterateBodyImpl(Map* map, HeapObject* obj,
+ int start_offset, int end_offset,
+ ObjectVisitor* v);
};
@@ -57,19 +66,19 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
static const int kEndOffset = end_offset;
static const int kSize = size;
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return offset >= kStartOffset && offset < kEndOffset;
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, ObjectVisitor* v) {
+ static inline void IterateBody(Map* map, HeapObject* obj, ObjectVisitor* v) {
IteratePointers(obj, start_offset, end_offset, v);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
- IterateBody(obj, v);
+ IterateBody(map, obj, v);
}
static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
@@ -84,12 +93,12 @@ class FlexibleBodyDescriptor final : public BodyDescriptorBase {
public:
static const int kStartOffset = start_offset;
- static bool IsValidSlot(HeapObject* obj, int offset) {
+ static bool IsValidSlot(Map* map, HeapObject* obj, int offset) {
return (offset >= kStartOffset);
}
template <typename ObjectVisitor>
- static inline void IterateBody(HeapObject* obj, int object_size,
+ static inline void IterateBody(Map* map, HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, start_offset, object_size, v);
}
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 458b807f05..c735cc0813 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -17,12 +17,14 @@
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/maybe-object.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module.h"
#include "src/objects/promise-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/transitions.h"
+#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -69,6 +71,15 @@ void Object::VerifyPointer(Object* p) {
}
}
+void MaybeObject::VerifyMaybeObjectPointer(MaybeObject* p) {
+ HeapObject* heap_object;
+ if (p->ToStrongOrWeakHeapObject(&heap_object)) {
+ HeapObject::VerifyHeapPointer(heap_object);
+ } else {
+ CHECK(p->IsSmi() || p->IsClearedWeakHeapObject());
+ }
+}
+
namespace {
void VerifyForeignPointer(HeapObject* host, Object* foreign) {
host->VerifyPointer(foreign);
@@ -109,14 +120,33 @@ void HeapObject::HeapObjectVerify() {
case BIGINT_TYPE:
BigInt::cast(this)->BigIntVerify();
break;
+ case CALL_HANDLER_INFO_TYPE:
+ CallHandlerInfo::cast(this)->CallHandlerInfoVerify();
+ break;
case HASH_TABLE_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
case FIXED_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
FixedArray::cast(this)->FixedArrayVerify();
break;
+ case WEAK_FIXED_ARRAY_TYPE:
+ WeakFixedArray::cast(this)->WeakFixedArrayVerify();
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayVerify();
break;
+ case FEEDBACK_METADATA_TYPE:
+ FeedbackMetadata::cast(this)->FeedbackMetadataVerify();
+ break;
case BYTE_ARRAY_TYPE:
ByteArray::cast(this)->ByteArrayVerify();
break;
@@ -161,6 +191,7 @@ void HeapObject::HeapObjectVerify() {
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -224,13 +255,9 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_VALUE_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
-
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
+ case JS_ARRAY_ITERATOR_TYPE:
JSArrayIterator::cast(this)->JSArrayIteratorVerify();
break;
-
case JS_STRING_ITERATOR_TYPE:
JSStringIterator::cast(this)->JSStringIteratorVerify();
break;
@@ -249,6 +276,9 @@ void HeapObject::HeapObjectVerify() {
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpVerify();
break;
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorVerify();
+ break;
case FILLER_TYPE:
break;
case JS_PROXY_TYPE:
@@ -348,7 +378,13 @@ void FeedbackCell::FeedbackCellVerify() {
CHECK(value()->IsUndefined(isolate) || value()->IsFeedbackVector());
}
-void FeedbackVector::FeedbackVectorVerify() { CHECK(IsFeedbackVector()); }
+void FeedbackVector::FeedbackVectorVerify() {
+ CHECK(IsFeedbackVector());
+ MaybeObject* code = optimized_code_weak_or_smi();
+ MaybeObject::VerifyMaybeObjectPointer(code);
+ CHECK(code->IsSmi() || code->IsClearedWeakHeapObject() ||
+ code->IsWeakHeapObject());
+}
template <class Traits>
void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
@@ -357,7 +393,8 @@ void FixedTypedArray<Traits>::FixedTypedArrayVerify() {
Traits::kInstanceType);
if (base_pointer() == this) {
CHECK(external_pointer() ==
- ExternalReference::fixed_typed_array_base_data_offset().address());
+ ExternalReference::fixed_typed_array_base_data_offset(GetIsolate())
+ .address());
} else {
CHECK_NULL(base_pointer());
}
@@ -485,6 +522,8 @@ void Map::MapVerify() {
DCHECK(prototype_info() == Smi::kZero ||
prototype_info()->IsPrototypeInfo());
}
+ CHECK(prototype_validity_cell()->IsSmi() ||
+ prototype_validity_cell()->IsCell());
}
@@ -509,6 +548,12 @@ void FixedArray::FixedArrayVerify() {
}
}
+void WeakFixedArray::WeakFixedArrayVerify() {
+ for (int i = 0; i < length(); i++) {
+ MaybeObject::VerifyMaybeObjectPointer(Get(i));
+ }
+}
+
void PropertyArray::PropertyArrayVerify() {
if (length() == 0) {
CHECK_EQ(this, this->GetHeap()->empty_property_array());
@@ -537,17 +582,45 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
}
+void FeedbackMetadata::FeedbackMetadataVerify() {
+ if (slot_count() == 0) {
+ CHECK_EQ(GetHeap()->empty_feedback_metadata(), this);
+ } else {
+ FeedbackMetadataIterator iter(this);
+ while (iter.HasNext()) {
+ iter.Next();
+ FeedbackSlotKind kind = iter.kind();
+ CHECK_NE(FeedbackSlotKind::kInvalid, kind);
+ CHECK_GT(FeedbackSlotKind::kKindsNumber, kind);
+ }
+ }
+}
+
void DescriptorArray::DescriptorArrayVerify() {
FixedArrayVerify();
+ int nof_descriptors = number_of_descriptors();
if (number_of_descriptors_storage() == 0) {
Heap* heap = GetHeap();
CHECK_EQ(heap->empty_descriptor_array(), this);
CHECK_EQ(2, length());
- CHECK_EQ(0, number_of_descriptors());
+ CHECK_EQ(0, nof_descriptors);
CHECK_EQ(heap->empty_enum_cache(), GetEnumCache());
} else {
CHECK_LT(2, length());
- CHECK_LE(LengthFor(number_of_descriptors()), length());
+ CHECK_LE(LengthFor(nof_descriptors), length());
+
+ Isolate* isolate = GetIsolate();
+ // Check that properties with private symbols names are non-enumerable.
+ for (int descriptor = 0; descriptor < nof_descriptors; descriptor++) {
+ Object* key = get(ToKeyIndex(descriptor));
+ // number_of_descriptors() may be out of sync with the actual descriptors
+ // written during descriptor array construction.
+ if (key->IsUndefined(isolate)) continue;
+ if (Name::cast(key)->IsPrivate()) {
+ PropertyDetails details = GetDetails(descriptor);
+ CHECK_NE(details.attributes() & DONT_ENUM, 0);
+ }
+ }
}
}
@@ -789,33 +862,60 @@ void JSFunction::JSFunctionVerify() {
void SharedFunctionInfo::SharedFunctionInfoVerify() {
CHECK(IsSharedFunctionInfo());
- VerifyObjectField(kCodeOffset);
- VerifyObjectField(kDebugInfoOffset);
- VerifyObjectField(kFeedbackMetadataOffset);
VerifyObjectField(kFunctionDataOffset);
+ VerifyObjectField(kDebugInfoOffset);
+ VerifyObjectField(kOuterScopeInfoOrFeedbackMetadataOffset);
VerifyObjectField(kFunctionIdentifierOffset);
- VerifyObjectField(kNameOffset);
- VerifyObjectField(kOuterScopeInfoOffset);
- VerifyObjectField(kScopeInfoOffset);
+ VerifyObjectField(kNameOrScopeInfoOffset);
VerifyObjectField(kScriptOffset);
- CHECK(raw_name() == kNoSharedNameSentinel || raw_name()->IsString());
+ Object* value = name_or_scope_info();
+ CHECK(value == kNoSharedNameSentinel || value->IsString() ||
+ value->IsScopeInfo());
+ if (value->IsScopeInfo()) {
+ CHECK_LT(0, ScopeInfo::cast(value)->length());
+ CHECK_NE(value, GetHeap()->empty_scope_info());
+ }
Isolate* isolate = GetIsolate();
- CHECK(function_data()->IsUndefined(isolate) || IsApiFunction() ||
- HasBytecodeArray() || HasAsmWasmData() ||
- HasLazyDeserializationBuiltinId() || HasPreParsedScopeData());
+ CHECK(HasCodeObject() || IsApiFunction() || HasBytecodeArray() ||
+ HasAsmWasmData() || HasBuiltinId() || HasPreParsedScopeData());
CHECK(function_identifier()->IsUndefined(isolate) || HasBuiltinFunctionId() ||
HasInferredName());
+ if (!is_compiled()) {
+ CHECK(!HasFeedbackMetadata());
+ CHECK(outer_scope_info()->IsScopeInfo() ||
+ outer_scope_info()->IsTheHole(isolate));
+ } else if (HasBytecodeArray()) {
+ CHECK(HasFeedbackMetadata());
+ CHECK(feedback_metadata()->IsFeedbackMetadata());
+ }
+
int expected_map_index = Context::FunctionMapIndex(
- language_mode(), kind(), true, has_shared_name(), needs_home_object());
+ language_mode(), kind(), true, HasSharedName(), needs_home_object());
CHECK_EQ(expected_map_index, function_map_index());
if (scope_info()->length() > 0) {
- CHECK(kind() == scope_info()->function_kind());
- CHECK_EQ(kind() == kModule, scope_info()->scope_type() == MODULE_SCOPE);
+ ScopeInfo* info = scope_info();
+ CHECK(kind() == info->function_kind());
+ CHECK_EQ(kind() == kModule, info->scope_type() == MODULE_SCOPE);
+ CHECK_EQ(raw_start_position(), info->StartPosition());
+ CHECK_EQ(raw_end_position(), info->EndPosition());
+ }
+
+ if (IsApiFunction()) {
+ CHECK(construct_as_builtin());
+ } else if (!HasBuiltinId()) {
+ CHECK(!construct_as_builtin());
+ } else {
+ int id = builtin_id();
+ if (id != Builtins::kCompileLazy && id != Builtins::kEmptyFunction) {
+ CHECK(construct_as_builtin());
+ } else {
+ CHECK(!construct_as_builtin());
+ }
}
}
@@ -908,7 +1008,8 @@ void CodeDataContainer::CodeDataContainerVerify() {
}
void Code::CodeVerify() {
- CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
+ CHECK_LE(constant_pool_offset(), InstructionSize());
+ CHECK(IsAligned(reinterpret_cast<intptr_t>(InstructionStart()),
kCodeAlignment));
relocation_info()->ObjectVerify();
Address last_gc_pc = nullptr;
@@ -924,46 +1025,6 @@ void Code::CodeVerify() {
}
-void Code::VerifyEmbeddedObjectsDependency() {
- if (!CanContainWeakObjects()) return;
- WeakCell* cell = CachedWeakCell();
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
- Object* obj = it.rinfo()->target_object();
- if (IsWeakObject(obj)) {
- if (obj->IsMap()) {
- Map* map = Map::cast(obj);
- CHECK(map->dependent_code()->Contains(DependentCode::kWeakCodeGroup,
- cell));
- } else if (obj->IsJSObject()) {
- if (isolate->heap()->InNewSpace(obj)) {
- ArrayList* list =
- GetIsolate()->heap()->weak_new_space_object_to_code_list();
- bool found = false;
- for (int i = 0; i < list->Length(); i += 2) {
- WeakCell* obj_cell = WeakCell::cast(list->Get(i));
- if (!obj_cell->cleared() && obj_cell->value() == obj &&
- WeakCell::cast(list->Get(i + 1)) == cell) {
- found = true;
- break;
- }
- }
- CHECK(found);
- } else {
- Handle<HeapObject> key_obj(HeapObject::cast(obj), isolate);
- DependentCode* dep =
- GetIsolate()->heap()->LookupWeakObjectToCodeDependency(key_obj);
- dep->Contains(DependentCode::kWeakCodeGroup, cell);
- }
- }
- }
- }
-}
-
-
void JSArray::JSArrayVerify() {
JSObjectVerify();
Isolate* isolate = GetIsolate();
@@ -1047,11 +1108,20 @@ void JSWeakMap::JSWeakMapVerify() {
void JSArrayIterator::JSArrayIteratorVerify() {
CHECK(IsJSArrayIterator());
JSObjectVerify();
- CHECK(object()->IsJSReceiver() || object()->IsUndefined(GetIsolate()));
+ CHECK(iterated_object()->IsJSReceiver() ||
+ iterated_object()->IsUndefined(GetIsolate()));
+
+ CHECK_GE(next_index()->Number(), 0);
+ CHECK_LE(next_index()->Number(), kMaxSafeInteger);
- CHECK_GE(index()->Number(), 0);
- CHECK_LE(index()->Number(), kMaxSafeInteger);
- CHECK(object_map()->IsMap() || object_map()->IsUndefined(GetIsolate()));
+ if (iterated_object()->IsJSTypedArray()) {
+ // JSTypedArray::length is limited to Smi range.
+ CHECK(next_index()->IsSmi());
+ CHECK_LE(next_index()->Number(), Smi::kMaxValue);
+ } else if (iterated_object()->IsJSArray()) {
+ // JSArray::length is limited to Uint32 range.
+ CHECK_LE(next_index()->Number(), kMaxUInt32);
+ }
}
void JSStringIterator::JSStringIteratorVerify() {
@@ -1256,6 +1326,14 @@ void JSRegExp::JSRegExpVerify() {
}
}
+void JSRegExpStringIterator::JSRegExpStringIteratorVerify() {
+ CHECK(IsJSRegExpStringIterator());
+ JSObjectVerify();
+ CHECK(iterating_string()->IsString());
+ CHECK(iterating_regexp()->IsObject());
+ VerifySmiField(kFlagsOffset);
+}
+
void JSProxy::JSProxyVerify() {
CHECK(IsJSProxy());
VerifyPointer(target());
@@ -1395,12 +1473,11 @@ void Module::ModuleVerify() {
void PrototypeInfo::PrototypeInfoVerify() {
CHECK(IsPrototypeInfo());
CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(GetIsolate()));
- if (prototype_users()->IsWeakFixedArray()) {
- WeakFixedArray::cast(prototype_users())->FixedArrayVerify();
+ if (prototype_users()->IsFixedArrayOfWeakCells()) {
+ FixedArrayOfWeakCells::cast(prototype_users())->FixedArrayVerify();
} else {
CHECK(prototype_users()->IsSmi());
}
- CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
}
void Tuple2::Tuple2Verify() {
@@ -1422,6 +1499,43 @@ void Tuple3::Tuple3Verify() {
VerifyObjectField(kValue3Offset);
}
+void WasmCompiledModule::WasmCompiledModuleVerify() {
+ CHECK(IsWasmCompiledModule());
+ VerifyObjectField(kSharedOffset);
+ VerifyObjectField(kNativeContextOffset);
+ VerifyObjectField(kExportWrappersOffset);
+ VerifyObjectField(kNextInstanceOffset);
+ VerifyObjectField(kPrevInstanceOffset);
+ VerifyObjectField(kOwningInstanceOffset);
+ VerifyObjectField(kWasmModuleOffset);
+ VerifyObjectField(kNativeModuleOffset);
+ VerifyObjectField(kLazyCompileDataOffset);
+ VerifyObjectField(kUseTrapHandlerOffset);
+}
+
+void WasmDebugInfo::WasmDebugInfoVerify() {
+ CHECK(IsWasmDebugInfo());
+ VerifyObjectField(kInstanceOffset);
+ CHECK(wasm_instance()->IsWasmInstanceObject());
+ VerifyObjectField(kInterpreterHandleOffset);
+ CHECK(interpreter_handle()->IsUndefined(GetIsolate()) ||
+ interpreter_handle()->IsForeign());
+ VerifyObjectField(kInterpretedFunctionsOffset);
+ VerifyObjectField(kLocalsNamesOffset);
+ VerifyObjectField(kCWasmEntriesOffset);
+ VerifyObjectField(kCWasmEntryMapOffset);
+}
+
+void WasmSharedModuleData::WasmSharedModuleDataVerify() {
+ CHECK(IsWasmSharedModuleData());
+ VerifyObjectField(kModuleWrapperOffset);
+ CHECK(module_wrapper()->IsForeign());
+ VerifyObjectField(kModuleBytesOffset);
+ VerifyObjectField(kScriptOffset);
+ VerifyObjectField(kAsmJsOffsetTableOffset);
+ VerifyObjectField(kBreakPointInfosOffset);
+}
+
void DataHandler::DataHandlerVerify() {
CHECK(IsDataHandler());
CHECK_IMPLIES(!smi_handler()->IsSmi(),
@@ -1481,6 +1595,14 @@ void AccessCheckInfo::AccessCheckInfoVerify() {
VerifyPointer(data());
}
+void CallHandlerInfo::CallHandlerInfoVerify() {
+ CHECK(IsCallHandlerInfo());
+ CHECK(map() == GetHeap()->side_effect_call_handler_info_map() ||
+ map() == GetHeap()->side_effect_free_call_handler_info_map());
+ VerifyPointer(callback());
+ VerifyPointer(js_callback());
+ VerifyPointer(data());
+}
void InterceptorInfo::InterceptorInfoVerify() {
CHECK(IsInterceptorInfo());
@@ -1543,6 +1665,14 @@ void Script::ScriptVerify() {
VerifyPointer(name());
VerifyPointer(wrapper());
VerifyPointer(line_ends());
+ for (int i = 0; i < shared_function_infos()->length(); ++i) {
+ MaybeObject* maybe_object = shared_function_infos()->Get(i);
+ HeapObject* heap_object;
+ CHECK(maybe_object->IsWeakHeapObject() ||
+ maybe_object->IsClearedWeakHeapObject() ||
+ (maybe_object->ToStrongHeapObject(&heap_object) &&
+ heap_object->IsUndefined(GetIsolate())));
+ }
}
@@ -1585,6 +1715,12 @@ void PreParsedScopeData::PreParsedScopeDataVerify() {
CHECK(child_data()->IsFixedArray());
}
+void InterpreterData::InterpreterDataVerify() {
+ CHECK(IsInterpreterData());
+ CHECK(bytecode_array()->IsBytecodeArray());
+ CHECK(interpreter_trampoline()->IsCode());
+}
+
#endif // VERIFY_HEAP
#ifdef DEBUG
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 9c3ac94ab5..bda031e063 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -18,15 +18,13 @@
#include "src/builtins/builtins.h"
#include "src/contexts-inl.h"
#include "src/conversions-inl.h"
-#include "src/factory.h"
#include "src/feedback-vector-inl.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
#include "src/handles-inl.h"
+#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
#include "src/isolate-inl.h"
-#include "src/isolate.h"
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup-cache-inl.h"
@@ -37,11 +35,11 @@
#include "src/objects/data-handler-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/hash-table-inl.h"
-#include "src/objects/hash-table.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/js-regexp-string-iterator-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-inl.h"
#include "src/objects/regexp-match-info.h"
@@ -79,19 +77,22 @@ int PropertyDetails::field_width_in_words() const {
}
TYPE_CHECKER(BigInt, BIGINT_TYPE)
-TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
+TYPE_CHECKER(BoilerplateDescription, BOILERPLATE_DESCRIPTION_TYPE)
TYPE_CHECKER(BreakPoint, TUPLE2_TYPE)
-TYPE_CHECKER(CallHandlerInfo, TUPLE3_TYPE)
+TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
+TYPE_CHECKER(CallHandlerInfo, CALL_HANDLER_INFO_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(CoverageInfo, FIXED_ARRAY_TYPE)
TYPE_CHECKER(DescriptorArray, DESCRIPTOR_ARRAY_TYPE)
TYPE_CHECKER(FeedbackCell, FEEDBACK_CELL_TYPE)
+TYPE_CHECKER(FeedbackMetadata, FEEDBACK_METADATA_TYPE)
TYPE_CHECKER(FeedbackVector, FEEDBACK_VECTOR_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HashTable, HASH_TABLE_TYPE)
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(JSArrayIterator, JS_ARRAY_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
TYPE_CHECKER(JSAsyncGeneratorObject, JS_ASYNC_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
@@ -116,6 +117,7 @@ TYPE_CHECKER(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE)
TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
TYPE_CHECKER(TemplateObjectDescription, TUPLE2_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(WasmGlobalObject, WASM_GLOBAL_TYPE)
TYPE_CHECKER(WasmInstanceObject, WASM_INSTANCE_TYPE)
TYPE_CHECKER(WasmMemoryObject, WASM_MEMORY_TYPE)
TYPE_CHECKER(WasmModuleObject, WASM_MODULE_TYPE)
@@ -151,10 +153,6 @@ bool HeapObject::IsJSGeneratorObject() const {
IsJSAsyncGeneratorObject();
}
-bool HeapObject::IsBoilerplateDescription() const {
- return IsFixedArrayExact();
-}
-
bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
bool HeapObject::IsExternal() const {
@@ -311,12 +309,6 @@ bool HeapObject::IsJSSetIterator() const {
instance_type == JS_SET_KEY_VALUE_ITERATOR_TYPE);
}
-bool HeapObject::IsJSArrayIterator() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
- instance_type <= LAST_ARRAY_ITERATOR_TYPE);
-}
-
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
@@ -346,8 +338,6 @@ bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArrayExact(); }
bool Object::IsLayoutDescriptor() const { return IsSmi() || IsByteArray(); }
-bool HeapObject::IsFeedbackMetadata() const { return IsFixedArrayExact(); }
-
bool HeapObject::IsDeoptimizationData() const {
// Must be a fixed array.
if (!IsFixedArrayExact()) return false;
@@ -429,6 +419,10 @@ bool HeapObject::IsNumberWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsNumber();
}
+bool HeapObject::IsBigIntWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsBigInt();
+}
+
bool HeapObject::IsSymbolWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsSymbol();
}
@@ -442,10 +436,6 @@ bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
-bool HeapObject::IsWeakHashTable() const {
- return map() == GetHeap()->weak_hash_table_map();
-}
-
bool HeapObject::IsDictionary() const {
return IsHashTable() && this != GetHeap()->string_table();
}
@@ -632,7 +622,6 @@ CAST_ACCESSOR(TemplateObjectDescription)
CAST_ACCESSOR(Tuple2)
CAST_ACCESSOR(Tuple3)
CAST_ACCESSOR(WeakCell)
-CAST_ACCESSOR(WeakHashTable)
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
@@ -893,7 +882,7 @@ MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
}
// static
-MUST_USE_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
+V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
Handle<JSReceiver> object) {
return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
ALL_PROPERTIES,
@@ -924,6 +913,10 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
}
+MaybeObject** HeapObject::RawMaybeWeakField(HeapObject* obj, int byte_offset) {
+ return reinterpret_cast<MaybeObject**>(FIELD_ADDR(obj, byte_offset));
+}
+
int Smi::ToInt(const Object* object) { return Smi::cast(object)->value(); }
MapWord MapWord::FromMap(const Map* map) {
@@ -1662,8 +1655,8 @@ void Object::VerifyApiCallResultType() {
DCHECK(IsHeapObject());
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
- IsUndefined(isolate) || IsTrue(isolate) || IsFalse(isolate) ||
- IsNull(isolate))) {
+ IsBigInt() || IsUndefined(isolate) || IsTrue(isolate) ||
+ IsFalse(isolate) || IsNull(isolate))) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
@@ -2230,6 +2223,15 @@ int HeapObject::SizeFromMap(Map* map) const {
return FixedDoubleArray::SizeFor(
reinterpret_cast<const FixedDoubleArray*>(this)->synchronized_length());
}
+ if (instance_type == FEEDBACK_METADATA_TYPE) {
+ return FeedbackMetadata::SizeFor(
+ reinterpret_cast<const FeedbackMetadata*>(this)
+ ->synchronized_slot_count());
+ }
+ if (instance_type == WEAK_FIXED_ARRAY_TYPE) {
+ return WeakFixedArray::SizeFor(
+ reinterpret_cast<const WeakFixedArray*>(this)->synchronized_length());
+ }
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
return reinterpret_cast<const FixedTypedArrayBase*>(this)->TypedArraySize(
@@ -2292,6 +2294,13 @@ bool AccessorInfo::has_getter() {
return result;
}
+bool AccessorInfo::has_setter() {
+ bool result = setter() != Smi::kZero;
+ DCHECK_EQ(result, setter() != Smi::kZero &&
+ Foreign::cast(setter())->foreign_address() != nullptr);
+ return result;
+}
+
ACCESSORS(AsyncGeneratorRequest, next, Object, kNextOffset)
SMI_ACCESSORS(AsyncGeneratorRequest, resume_mode, kResumeModeOffset)
ACCESSORS(AsyncGeneratorRequest, value, Object, kValueOffset)
@@ -2317,6 +2326,15 @@ bool FunctionTemplateInfo::instantiated() {
return shared_function_info()->IsSharedFunctionInfo();
}
+bool FunctionTemplateInfo::BreakAtEntry() {
+ Object* maybe_shared = shared_function_info();
+ if (maybe_shared->IsSharedFunctionInfo()) {
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(maybe_shared);
+ return shared->BreakAtEntry();
+ }
+ return false;
+}
+
FunctionTemplateInfo* FunctionTemplateInfo::GetParent(Isolate* isolate) {
Object* parent = parent_template();
return parent->IsUndefined(isolate) ? nullptr
@@ -2342,7 +2360,6 @@ ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
-ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
@@ -2387,11 +2404,18 @@ BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
BOOL_ACCESSORS(InterceptorInfo, flags, is_named, kNamed)
+BOOL_ACCESSORS(InterceptorInfo, flags, has_no_side_effect, kHasNoSideEffect)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, js_callback, Object, kJsCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
+bool CallHandlerInfo::IsSideEffectFreeCallHandlerInfo() const {
+ DCHECK(map() == GetHeap()->side_effect_call_handler_info_map() ||
+ map() == GetHeap()->side_effect_free_call_handler_info_map());
+ return map() == GetHeap()->side_effect_free_call_handler_info_map();
+}
+
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
ACCESSORS(TemplateInfo, serial_number, Object, kSerialNumberOffset)
SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
@@ -2545,8 +2569,12 @@ void JSFunction::ClearOptimizationMarker() {
feedback_vector()->ClearOptimizationMarker();
}
+// Optimized code marked for deoptimization will tier back down to running
+// interpreted on its next activation, and already doesn't count as IsOptimized.
bool JSFunction::IsInterpreted() {
- return code()->is_interpreter_trampoline_builtin();
+ return code()->is_interpreter_trampoline_builtin() ||
+ (code()->kind() == Code::OPTIMIZED_FUNCTION &&
+ code()->marked_for_deoptimization());
}
bool JSFunction::ChecksOptimizationMarker() {
@@ -2581,7 +2609,7 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
AbstractCode* JSFunction::abstract_code() {
if (IsInterpreted()) {
- return AbstractCode::cast(shared()->bytecode_array());
+ return AbstractCode::cast(shared()->GetBytecodeArray());
} else {
return AbstractCode::cast(code());
}
@@ -3151,6 +3179,8 @@ BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_special_data_property,
BIT_FIELD_ACCESSORS(AccessorInfo, flags, replace_on_access,
AccessorInfo::ReplaceOnAccessBit)
BIT_FIELD_ACCESSORS(AccessorInfo, flags, is_sloppy, AccessorInfo::IsSloppyBit)
+BIT_FIELD_ACCESSORS(AccessorInfo, flags, has_no_side_effect,
+ AccessorInfo::HasNoSideEffectBit)
BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
AccessorInfo::InitialAttributesBits)
@@ -3376,35 +3406,6 @@ Handle<ObjectHashTable> ObjectHashTable::Shrink(Handle<ObjectHashTable> table) {
return DerivedHashTable::Shrink(table);
}
-bool WeakHashTableShape::IsMatch(Handle<Object> key, Object* other) {
- if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
- return key->IsWeakCell() ? WeakCell::cast(*key)->value() == other
- : *key == other;
-}
-
-uint32_t WeakHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
- intptr_t hash =
- key->IsWeakCell()
- ? reinterpret_cast<intptr_t>(WeakCell::cast(*key)->value())
- : reinterpret_cast<intptr_t>(*key);
- return (uint32_t)(hash & 0xFFFFFFFF);
-}
-
-uint32_t WeakHashTableShape::HashForObject(Isolate* isolate, Object* other) {
- if (other->IsWeakCell()) other = WeakCell::cast(other)->value();
- intptr_t hash = reinterpret_cast<intptr_t>(other);
- return (uint32_t)(hash & 0xFFFFFFFF);
-}
-
-Handle<Object> WeakHashTableShape::AsHandle(Isolate* isolate,
- Handle<Object> key) {
- return key;
-}
-
-int WeakHashTableShape::GetMapRootIndex() {
- return Heap::kWeakHashTableMapRootIndex;
-}
-
Relocatable::Relocatable(Isolate* isolate) {
isolate_ = isolate;
prev_ = isolate->relocatable_top();
@@ -3469,15 +3470,15 @@ ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
-bool ScopeInfo::IsAsmModule() { return AsmModuleField::decode(Flags()); }
+bool ScopeInfo::IsAsmModule() const { return AsmModuleField::decode(Flags()); }
-bool ScopeInfo::HasSimpleParameters() {
+bool ScopeInfo::HasSimpleParameters() const {
return HasSimpleParametersField::decode(Flags());
}
#define FIELD_ACCESSORS(name) \
void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
- int ScopeInfo::name() { \
+ int ScopeInfo::name() const { \
if (length() > 0) { \
return Smi::ToInt(get(k##name)); \
} else { \
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 68f147f7d4..942b9de0ba 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -91,8 +91,20 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
break;
case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
FixedArray::cast(this)->FixedArrayPrint(os);
break;
+ case BOILERPLATE_DESCRIPTION_TYPE:
+ BoilerplateDescription::cast(this)->BoilerplateDescriptionPrint(os);
+ break;
case PROPERTY_ARRAY_TYPE:
PropertyArray::cast(this)->PropertyArrayPrint(os);
break;
@@ -126,12 +138,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
#undef PRINT_FIXED_TYPED_ARRAY
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
- JSArrayIterator::cast(this)->JSArrayIteratorPrint(os);
- break;
-
case FILLER_TYPE:
os << "filler";
break;
@@ -143,6 +149,7 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE: // TODO(titzer): debug printing for wasm objects
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -158,6 +165,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpPrint(os);
break;
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ JSRegExpStringIterator::cast(this)->JSRegExpStringIteratorPrint(os);
+ break;
case ODDBALL_TYPE:
Oddball::cast(this)->to_string()->Print(os);
break;
@@ -215,6 +225,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignPrint(os);
break;
+ case CALL_HANDLER_INFO_TYPE:
+ CallHandlerInfo::cast(this)->CallHandlerInfoPrint(os);
+ break;
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(os);
break;
@@ -233,6 +246,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ARRAY_BUFFER_TYPE:
JSArrayBuffer::cast(this)->JSArrayBufferPrint(os);
break;
+ case JS_ARRAY_ITERATOR_TYPE:
+ JSArrayIterator::cast(this)->JSArrayIteratorPrint(os);
+ break;
case JS_TYPED_ARRAY_TYPE:
JSTypedArray::cast(this)->JSTypedArrayPrint(os);
break;
@@ -255,8 +271,39 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case SCOPE_INFO_TYPE:
ScopeInfo::cast(this)->ScopeInfoPrint(os);
break;
-
- default:
+ case FEEDBACK_METADATA_TYPE:
+ FeedbackMetadata::cast(this)->FeedbackMetadataPrint(os);
+ break;
+ case WEAK_FIXED_ARRAY_TYPE:
+ WeakFixedArray::cast(this)->WeakFixedArrayPrint(os);
+ break;
+ case INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case STRING_TYPE:
+ case CONS_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case THIN_STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
+ case CONS_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SLICED_ONE_BYTE_STRING_TYPE:
+ case THIN_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
+ case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+ // TODO(all): Handle these types too.
os << "UNKNOWN TYPE " << map()->instance_type();
UNREACHABLE();
break;
@@ -575,6 +622,16 @@ void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
+void JSRegExpStringIterator::JSRegExpStringIteratorPrint(
+ std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSRegExpStringIterator");
+ os << "\n - regex: " << Brief(iterating_regexp());
+ os << "\n - string: " << Brief(iterating_string());
+ os << "\n - done: " << done();
+ os << "\n - global: " << global();
+ os << "\n - unicode: " << unicode();
+ JSObjectPrintBody(os, this);
+}
void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Symbol");
@@ -591,7 +648,12 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
os << "\n - type: " << instance_type();
- os << "\n - instance size: " << instance_size();
+ os << "\n - instance size: ";
+ if (instance_size() == kVariableSizeSentinel) {
+ os << "variable";
+ } else {
+ os << instance_size();
+ }
if (IsJSObjectMap()) {
os << "\n - inobject properties: " << GetInObjectProperties();
}
@@ -626,6 +688,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
} else {
os << "\n - back pointer: " << Brief(GetBackPointer());
}
+ os << "\n - prototype_validity cell: " << Brief(prototype_validity_cell());
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
<< Brief(instance_descriptors());
@@ -638,8 +701,14 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
TransitionsAccessor transitions(this, &no_gc);
int nof_transitions = transitions.NumberOfTransitions();
if (nof_transitions > 0) {
- os << "\n - transitions #" << nof_transitions << ": "
- << Brief(raw_transitions());
+ os << "\n - transitions #" << nof_transitions << ": ";
+ HeapObject* heap_object;
+ Smi* smi;
+ if (raw_transitions()->ToSmi(&smi)) {
+ os << Brief(smi);
+ } else if (raw_transitions()->ToStrongOrWeakHeapObject(&heap_object)) {
+ os << Brief(heap_object);
+ }
transitions.PrintTransitions(os);
}
}
@@ -657,14 +726,56 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
os << "\n - aliased_context_slot: " << aliased_context_slot();
}
+namespace {
+void PrintFixedArrayWithHeader(std::ostream& os, FixedArray* array,
+ const char* type) {
+ array->PrintHeader(os, type);
+ os << "\n - length: " << array->length();
+ PrintFixedArrayElements(os, array);
+ os << "\n";
+}
-void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, IsHashTable() ? "HashTable" : "FixedArray");
- os << "\n - length: " << length();
- PrintFixedArrayElements(os, this);
+void PrintWeakFixedArrayElements(std::ostream& os, WeakFixedArray* array) {
+ // Print in array notation for non-sparse arrays.
+ MaybeObject* previous_value = array->length() > 0 ? array->Get(0) : nullptr;
+ MaybeObject* value = nullptr;
+ int previous_index = 0;
+ int i;
+ for (i = 1; i <= array->length(); i++) {
+ if (i < array->length()) value = array->Get(i);
+ if (previous_value == value && i != array->length()) {
+ continue;
+ }
+ os << "\n";
+ std::stringstream ss;
+ ss << previous_index;
+ if (previous_index != i - 1) {
+ ss << '-' << (i - 1);
+ }
+ os << std::setw(12) << ss.str() << ": " << MaybeObjectBrief(previous_value);
+ previous_index = i;
+ previous_value = value;
+ }
+}
+
+void PrintWeakFixedArrayWithHeader(std::ostream& os, WeakFixedArray* array) {
+ array->PrintHeader(os, "WeakFixedArray");
+ os << "\n - length: " << array->length() << "\n";
+ PrintWeakFixedArrayElements(os, array);
os << "\n";
}
+} // namespace
+
+void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
+ PrintFixedArrayWithHeader(os, this,
+ IsHashTable() ? "HashTable" : "FixedArray");
+}
+
+void BoilerplateDescription::BoilerplateDescriptionPrint(std::ostream& os) {
+ PrintFixedArrayWithHeader(os, this, "BoilerplateDescription");
+}
+
void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyArray");
os << "\n - length: " << length();
@@ -680,6 +791,9 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void WeakFixedArray::WeakFixedArrayPrint(std::ostream& os) {
+ PrintWeakFixedArrayWithHeader(os, this);
+}
void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TransitionArray");
@@ -739,13 +853,8 @@ void FeedbackMetadata::Print() {
os << std::flush;
}
-void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) { // NOLINT
+void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {
HeapObject::PrintHeader(os, "FeedbackMetadata");
- os << "\n - length: " << length();
- if (length() == 0) {
- os << " (empty)\n";
- return;
- }
os << "\n - slot_count: " << slot_count();
FeedbackMetadataIterator iter(this);
@@ -844,7 +953,8 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
case FeedbackSlotKind::kStoreKeyedSloppy:
case FeedbackSlotKind::kInstanceOf:
case FeedbackSlotKind::kStoreDataPropertyInLiteral:
- case FeedbackSlotKind::kStoreKeyedStrict: {
+ case FeedbackSlotKind::kStoreKeyedStrict:
+ case FeedbackSlotKind::kStoreInArrayLiteral: {
os << ICState2String(StateFromFeedback());
break;
}
@@ -988,7 +1098,6 @@ void JSCollectionIterator::JSCollectionIteratorPrint(
os << "\n";
}
-
void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSSetIterator");
JSCollectionIteratorPrint(os);
@@ -1023,7 +1132,7 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
if (is_shared()) os << "\n - shared";
- if (has_guard_region()) os << "\n - has_guard_region";
+ if (is_wasm_memory()) os << "\n - is_wasm_memory";
if (is_growable()) os << "\n - growable";
JSObjectPrintBody(os, this, !was_neutered());
}
@@ -1041,20 +1150,9 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
JSObjectPrintHeader(os, this, "JSArrayIterator");
-
- InstanceType instance_type = map()->instance_type();
- os << "\n - type: ";
- if (instance_type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
- os << "keys";
- } else if (instance_type <= LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) {
- os << "entries";
- } else {
- os << "values";
- }
-
- os << "\n - object: " << Brief(object());
- os << "\n - index: " << Brief(index());
-
+ os << "\n - iterated_object: " << Brief(iterated_object());
+ os << "\n - next_index: " << Brief(next_index());
+ os << "\n - kind: " << kind();
JSObjectPrintBody(os, this);
}
@@ -1092,14 +1190,14 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "<no-prototype-slot>";
}
os << "\n - shared_info: " << Brief(shared());
- os << "\n - name: " << Brief(shared()->name());
+ os << "\n - name: " << Brief(shared()->Name());
// Print Builtin name for builtin functions
int builtin_index = code()->builtin_index();
if (builtin_index != -1 && !IsInterpreted()) {
if (builtin_index == Builtins::kDeserializeLazy) {
- if (shared()->HasLazyDeserializationBuiltinId()) {
- builtin_index = shared()->lazy_deserialization_builtin_id();
+ if (shared()->HasBuiltinId()) {
+ builtin_index = shared()->builtin_id();
os << "\n - builtin: " << GetIsolate()->builtins()->name(builtin_index)
<< "(lazy)";
}
@@ -1116,15 +1214,13 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
if (IsInterpreted()) {
os << "\n - interpreted";
if (shared()->HasBytecodeArray()) {
- os << "\n - bytecode: " << shared()->bytecode_array();
+ os << "\n - bytecode: " << shared()->GetBytecodeArray();
}
}
if (WasmExportedFunction::IsWasmExportedFunction(this)) {
WasmExportedFunction* function = WasmExportedFunction::cast(this);
os << "\n - WASM instance "
<< reinterpret_cast<void*>(function->instance());
- os << "\n context "
- << reinterpret_cast<void*>(function->instance()->wasm_context()->get());
os << "\n - WASM function index " << function->function_index();
}
shared()->PrintSourceCode(os);
@@ -1141,8 +1237,8 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
if (HasSourceCode()) {
os << "\n - source code: ";
String* source = String::cast(Script::cast(script())->source());
- int start = start_position();
- int length = end_position() - start;
+ int start = StartPosition();
+ int length = EndPosition() - start;
std::unique_ptr<char[]> source_string = source->ToCString(
DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, nullptr);
os << source_string.get();
@@ -1152,11 +1248,14 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
os << "\n - name: ";
- if (has_shared_name()) {
- os << Brief(raw_name());
+ if (HasSharedName()) {
+ os << Brief(Name());
} else {
os << "<no-shared-name>";
}
+ if (HasInferredName()) {
+ os << "\n - inferred name: " << Brief(inferred_name());
+ }
os << "\n - kind: " << kind();
if (needs_home_object()) {
os << "\n - needs_home_object";
@@ -1165,13 +1264,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
- os << " - code: " << Brief(code());
- if (HasBytecodeArray()) {
- os << "\n - bytecode_array: " << bytecode_array();
- }
- if (HasAsmWasmData()) {
- os << "\n - asm_wasm_data: " << Brief(asm_wasm_data());
- }
+ os << "\n - data: " << Brief(function_data());
+ os << "\n - code (from data): " << Brief(GetCode());
PrintSourceCode(os);
// Script files are often large, hard to read.
// os << "\n - script =";
@@ -1184,21 +1278,23 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - declaration";
}
os << "\n - function token position: " << function_token_position();
- os << "\n - start position: " << start_position();
- os << "\n - end position: " << end_position();
+ os << "\n - start position: " << StartPosition();
+ os << "\n - end position: " << EndPosition();
if (HasDebugInfo()) {
os << "\n - debug info: " << Brief(debug_info());
} else {
os << "\n - no debug info";
}
os << "\n - scope info: " << Brief(scope_info());
+ if (HasOuterScopeInfo()) {
+ os << "\n - outer scope info: " << Brief(GetOuterScopeInfo());
+ }
os << "\n - length: " << length();
os << "\n - feedback_metadata: ";
- feedback_metadata()->FeedbackMetadataPrint(os);
- if (HasPreParsedScopeData()) {
- os << "\n - preparsed scope data: " << preparsed_scope_data();
+ if (HasFeedbackMetadata()) {
+ feedback_metadata()->FeedbackMetadataPrint(os);
} else {
- os << "\n - no preparsed scope data";
+ os << "<none>";
}
os << "\n";
}
@@ -1443,7 +1539,6 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - weak cell: " << Brief(weak_cell());
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
- os << "\n - validity cell: " << Brief(validity_cell());
os << "\n - object create map: " << Brief(object_create_map());
os << "\n - should_be_fast_map: " << should_be_fast_map();
os << "\n";
@@ -1464,6 +1559,25 @@ void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
os << "\n";
}
+void WasmCompiledModule::WasmCompiledModulePrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "WasmCompiledModule");
+ os << "\n - shared: " << Brief(shared());
+ os << "\n";
+}
+
+void WasmDebugInfo::WasmDebugInfoPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "WasmDebugInfo");
+ os << "\n - wasm_instance: " << Brief(wasm_instance());
+ os << "\n";
+}
+
+void WasmSharedModuleData::WasmSharedModuleDataPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "WasmSharedModuleData");
+ os << "\n - module: " << module();
+ os << "\n";
+}
+
void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "LoadHandler");
// TODO(ishell): implement printing based on handler kind
@@ -1524,6 +1638,15 @@ void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "CallHandlerInfo");
+ os << "\n - callback: " << Brief(callback());
+ os << "\n - js_callback: " << Brief(js_callback());
+ os << "\n - data: " << Brief(data());
+ os << "\n - side_effect_free: "
+ << (IsSideEffectFreeCallHandlerInfo() ? "true" : "false");
+ os << "\n";
+}
void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "InterceptorInfo");
@@ -1656,7 +1779,7 @@ void PrintScopeInfoList(ScopeInfo* scope_info, std::ostream& os,
void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ScopeInfo");
if (length() == 0) {
- os << "\n - length = 0";
+ os << "\n - length = 0\n";
return;
}
@@ -1670,8 +1793,15 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
os << "\n - outer scope info: " << Brief(OuterScopeInfo());
}
if (HasFunctionName()) {
- os << "\n - function name: ";
- FunctionName()->ShortPrint(os);
+ os << "\n - function name: " << Brief(FunctionName());
+ }
+ if (HasInferredFunctionName()) {
+ os << "\n - inferred function name: " << Brief(InferredFunctionName());
+ }
+
+ if (HasPositionInfo()) {
+ os << "\n - start position: " << StartPosition();
+ os << "\n - end position: " << EndPosition();
}
os << "\n - length: " << length();
if (length() > 0) {
@@ -1763,6 +1893,13 @@ void PreParsedScopeData::PreParsedScopeDataPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "InterpreterData");
+ os << "\n - bytecode_array: " << Brief(bytecode_array());
+ os << "\n - interpreter_trampoline: " << Brief(interpreter_trampoline());
+ os << "\n";
+}
+
#endif // OBJECT_PRINT
// TODO(cbruni): remove once the new maptracer is in place.
@@ -1925,24 +2062,25 @@ void TransitionArray::Print(std::ostream& os) {
}
void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
- WeakCell* cell = nullptr;
+ Map* target;
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
return;
- case kWeakCell:
- cell = GetTargetCell<kWeakCell>();
+ case kWeakRef:
+ target = Map::cast(raw_transitions_->ToWeakHeapObject());
break;
- case kHandler:
- cell = GetTargetCell<kHandler>();
+ case kHandler: {
+ WeakCell* cell = GetTargetCell();
+ DCHECK(!cell->cleared());
+ target = Map::cast(cell->value());
break;
+ }
case kFullTransitionArray:
return transitions()->Print(os);
}
- DCHECK(!cell->cleared());
- Map* target = Map::cast(cell->value());
Name* key = GetSimpleTransitionKey(target);
- PrintOneTransition(os, key, target, raw_transitions_);
+ PrintOneTransition(os, key, target, raw_transitions_->GetHeapObject());
}
void TransitionsAccessor::PrintTransitionTree() {
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 9e80224d93..8057cb837b 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -59,6 +59,7 @@
#include "src/objects/debug-objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table.h"
+#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/map.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/promise-inl.h"
@@ -68,11 +69,11 @@
#include "src/regexp/jsregexp.h"
#include "src/safepoint-table.h"
#include "src/snapshot/code-serializer.h"
+#include "src/snapshot/snapshot.h"
#include "src/source-position-table.h"
#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/unicode-cache-inl.h"
#include "src/unicode-decoder.h"
#include "src/utils-inl.h"
@@ -1209,24 +1210,17 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
} else {
name_string = isolate->factory()->empty_string();
}
- Handle<Code> code = BUILTIN_CODE(isolate, HandleApiCall);
- bool is_constructor;
FunctionKind function_kind;
if (info->remove_prototype()) {
- is_constructor = false;
function_kind = kConciseMethod;
} else {
- is_constructor = true;
function_kind = kNormalFunction;
}
- Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
- name_string, code, is_constructor, function_kind);
- if (is_constructor) {
- result->SetConstructStub(*BUILTIN_CODE(isolate, JSConstructStubApi));
- }
+ Handle<SharedFunctionInfo> result =
+ isolate->factory()->NewSharedFunctionInfoForApiFunction(name_string, info,
+ function_kind);
result->set_length(info->length());
- result->set_api_func_data(*info);
result->DontAdaptArguments();
DCHECK(result->IsApiFunction());
@@ -1342,8 +1336,7 @@ int JSObject::GetHeaderSize(InstanceType type,
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
- return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
- : JSFunction::kSizeWithoutPrototype;
+ return JSFunction::GetHeaderSize(function_has_prototype_slot);
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_DATE_TYPE:
@@ -1352,6 +1345,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSArray::kSize;
case JS_ARRAY_BUFFER_TYPE:
return JSArrayBuffer::kSize;
+ case JS_ARRAY_ITERATOR_TYPE:
+ return JSArrayIterator::kSize;
case JS_TYPED_ARRAY_TYPE:
return JSTypedArray::kSize;
case JS_DATA_VIEW_TYPE:
@@ -1375,6 +1370,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSPromise::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ return JSRegExpStringIterator::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
@@ -1387,6 +1384,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSStringIterator::kSize;
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kHeaderSize;
+ case WASM_GLOBAL_TYPE:
+ return WasmGlobalObject::kSize;
case WASM_INSTANCE_TYPE:
return WasmInstanceObject::kSize;
case WASM_MEMORY_TYPE:
@@ -1396,10 +1395,6 @@ int JSObject::GetHeaderSize(InstanceType type,
case WASM_TABLE_TYPE:
return WasmTableObject::kSize;
default:
- if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
- type <= LAST_ARRAY_ITERATOR_TYPE) {
- return JSArrayIterator::kSize;
- }
UNREACHABLE();
}
}
@@ -1505,10 +1500,9 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
if (result.is_null()) return isolate->factory()->undefined_value();
Handle<Object> reboxed_result = handle(*result, isolate);
if (info->replace_on_access() && receiver->IsJSReceiver()) {
- args.CallNamedSetterCallback(
- reinterpret_cast<GenericNamedPropertySetterCallback>(
- &Accessors::ReconfigureToDataProperty),
- name, result);
+ args.CallAccessorSetter(
+ isolate->factory()->reconfigure_to_data_property_accessor(), name,
+ result);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
}
return reboxed_result;
@@ -1594,15 +1588,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
return Nothing<bool>();
}
- // The actual type of call_fun is either v8::AccessorNameSetterCallback or
- // i::Accesors::AccessorNameBooleanSetterCallback, depending on whether the
- // AccessorInfo was created by the API or internally (see accessors.cc).
- // Here we handle both cases using GenericNamedPropertySetterCallback and
- // its Call method.
- GenericNamedPropertySetterCallback call_fun =
- v8::ToCData<GenericNamedPropertySetterCallback>(info->setter());
-
- if (call_fun == nullptr) {
+ if (!info->has_setter()) {
// TODO(verwaest): We should not get here anymore once all AccessorInfos
// are marked as special_data_property. They cannot both be writable and
// not have a setter.
@@ -1615,9 +1601,15 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
Nothing<bool>());
}
+ // The actual type of setter callback is either
+ // v8::AccessorNameSetterCallback or
+ // i::Accesors::AccessorNameBooleanSetterCallback, depending on whether the
+ // AccessorInfo was created by the API or internally (see accessors.cc).
+ // Here we handle both cases using GenericNamedPropertySetterCallback and
+ // its Call method.
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
should_throw);
- Handle<Object> result = args.CallNamedSetterCallback(call_fun, name, value);
+ Handle<Object> result = args.CallAccessorSetter(info, name, value);
// In the case of AccessorNameSetterCallback, we know that the result value
// cannot have been set, so the result of Call will be null. In the case of
// AccessorNameBooleanSetterCallback, the result will either be null
@@ -2069,7 +2061,7 @@ bool HasExcludedProperty(
return false;
}
-MUST_USE_RESULT Maybe<bool> FastAssign(
+V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
Handle<JSReceiver> target, Handle<Object> source,
const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
// Non-empty strings are the only non-JSReceivers that need to be handled
@@ -2366,7 +2358,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
Handle<Object> default_species = isolate->array_function();
if (original_array->IsJSArray() &&
Handle<JSArray>::cast(original_array)->HasArrayPrototype(isolate) &&
- isolate->IsSpeciesLookupChainIntact()) {
+ isolate->IsArraySpeciesLookupChainIntact()) {
return default_species;
}
Handle<Object> constructor = isolate->factory()->undefined_value();
@@ -2413,7 +2405,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
}
// ES6 section 7.3.20 SpeciesConstructor ( O, defaultConstructor )
-MUST_USE_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> Object::SpeciesConstructor(
Isolate* isolate, Handle<JSReceiver> recv,
Handle<JSFunction> default_ctor) {
Handle<Object> ctor_obj;
@@ -2467,12 +2459,6 @@ bool Object::IterationHasObservableEffects() {
// affect iteration.
if (!isolate->IsArrayIteratorLookupChainIntact()) return true;
- // Check that the map of the initial array iterator hasn't changed.
- Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
- if (!isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
- return true;
- }
-
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
ElementsKind array_kind = array->GetElementsKind();
@@ -2515,6 +2501,26 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const MaybeObjectBrief& v) {
+ // TODO(marja): const-correct this the same way as the Object* version.
+ MaybeObject* maybe_object = const_cast<MaybeObject*>(v.value);
+ Smi* smi;
+ HeapObject* heap_object;
+ if (maybe_object->ToSmi(&smi)) {
+ smi->SmiPrint(os);
+ } else if (maybe_object->IsClearedWeakHeapObject()) {
+ os << "[cleared]";
+ } else if (maybe_object->ToWeakHeapObject(&heap_object)) {
+ os << "[weak] ";
+ heap_object->HeapObjectShortPrint(os);
+ } else if (maybe_object->ToStrongHeapObject(&heap_object)) {
+ heap_object->HeapObjectShortPrint(os);
+ } else {
+ UNREACHABLE();
+ }
+ return os;
+}
+
void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
}
@@ -2863,7 +2869,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
String* constructor_name =
- JSFunction::cast(constructor)->shared()->name();
+ JSFunction::cast(constructor)->shared()->Name();
if (constructor_name->length() > 0) {
accumulator->Add(global_object ? "<GlobalObject " : "<");
accumulator->Put(constructor_name);
@@ -2987,12 +2993,25 @@ VisitorId Map::GetVisitorId(Map* map) {
case FREE_SPACE_TYPE:
return kVisitFreeSpace;
- case HASH_TABLE_TYPE:
case FIXED_ARRAY_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
+ case HASH_TABLE_TYPE:
case DESCRIPTOR_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
+ case BLOCK_CONTEXT_TYPE:
+ case CATCH_CONTEXT_TYPE:
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ case EVAL_CONTEXT_TYPE:
+ case FUNCTION_CONTEXT_TYPE:
+ case MODULE_CONTEXT_TYPE:
+ case NATIVE_CONTEXT_TYPE:
+ case SCRIPT_CONTEXT_TYPE:
+ case WITH_CONTEXT_TYPE:
return kVisitFixedArray;
+ case WEAK_FIXED_ARRAY_TYPE:
+ return kVisitWeakFixedArray;
+
case FIXED_DOUBLE_ARRAY_TYPE:
return kVisitFixedDoubleArray;
@@ -3030,8 +3049,8 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_WEAK_SET_TYPE:
return kVisitJSWeakCollection;
- case JS_REGEXP_TYPE:
- return kVisitJSRegExp;
+ case CALL_HANDLER_INFO_TYPE:
+ return kVisitStruct;
case SHARED_FUNCTION_INFO_TYPE:
return kVisitSharedFunctionInfo;
@@ -3054,6 +3073,9 @@ VisitorId Map::GetVisitorId(Map* map) {
case CODE_DATA_CONTAINER_TYPE:
return kVisitCodeDataContainer;
+ case WASM_INSTANCE_TYPE:
+ return kVisitWasmInstanceObject;
+
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
@@ -3064,6 +3086,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
+ case JS_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
@@ -3078,13 +3101,10 @@ VisitorId Map::GetVisitorId(Map* map) {
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
-
-#define ARRAY_ITERATOR_CASE(type) case type:
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_CASE)
-#undef ARRAY_ITERATOR_CASE
-
case JS_PROMISE_TYPE:
- case WASM_INSTANCE_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_REGEXP_STRING_ITERATOR_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
case WASM_TABLE_TYPE:
@@ -3101,6 +3121,7 @@ VisitorId Map::GetVisitorId(Map* map) {
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
+ case FEEDBACK_METADATA_TYPE:
return kVisitDataObject;
case BIGINT_TYPE:
@@ -3260,9 +3281,43 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
return;
}
switch (map()->instance_type()) {
- case MAP_TYPE:
- os << "<Map(" << ElementsKindToString(Map::cast(this)->elements_kind())
- << ")>";
+ case MAP_TYPE: {
+ os << "<Map";
+ Map* mapInstance = Map::cast(this);
+ if (mapInstance->IsJSObjectMap()) {
+ os << "(" << ElementsKindToString(mapInstance->elements_kind()) << ")";
+ } else if (mapInstance->instance_size() != kVariableSizeSentinel) {
+ os << "[" << mapInstance->instance_size() << "]";
+ }
+ os << ">";
+ } break;
+ case BLOCK_CONTEXT_TYPE:
+ os << "<BlockContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case CATCH_CONTEXT_TYPE:
+ os << "<CatchContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case DEBUG_EVALUATE_CONTEXT_TYPE:
+ os << "<DebugEvaluateContext[" << FixedArray::cast(this)->length()
+ << "]>";
+ break;
+ case EVAL_CONTEXT_TYPE:
+ os << "<EvalContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case FUNCTION_CONTEXT_TYPE:
+ os << "<FunctionContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case MODULE_CONTEXT_TYPE:
+ os << "<ModuleContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case NATIVE_CONTEXT_TYPE:
+ os << "<NativeContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case SCRIPT_CONTEXT_TYPE:
+ os << "<ScriptContext[" << FixedArray::cast(this)->length() << "]>";
+ break;
+ case WITH_CONTEXT_TYPE:
+ os << "<WithContext[" << FixedArray::cast(this)->length() << "]>";
break;
case HASH_TABLE_TYPE:
os << "<HashTable[" << FixedArray::cast(this)->length() << "]>";
@@ -3270,6 +3325,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case FIXED_ARRAY_TYPE:
os << "<FixedArray[" << FixedArray::cast(this)->length() << "]>";
break;
+ case BOILERPLATE_DESCRIPTION_TYPE:
+ os << "<BoilerplateDescription[" << FixedArray::cast(this)->length()
+ << "]>";
+ break;
case FIXED_DOUBLE_ARRAY_TYPE:
os << "<FixedDoubleArray[" << FixedDoubleArray::cast(this)->length()
<< "]>";
@@ -3341,14 +3400,20 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
- case SCOPE_INFO_TYPE:
- os << "<ScopeInfo[" << ScopeInfo::cast(this)->length() << "]>";
+ case SCOPE_INFO_TYPE: {
+ ScopeInfo* scope = ScopeInfo::cast(this);
+ os << "<ScopeInfo";
+ if (scope->length()) os << " " << scope->scope_type() << " ";
+ os << "[" << scope->length() << "]>";
break;
+ }
case CODE_TYPE: {
Code* code = Code::cast(this);
os << "<Code " << Code::Kind2String(code->kind());
if (code->is_stub()) {
os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
+ } else if (code->is_builtin()) {
+ os << " " << Builtins::name(code->builtin_index());
}
os << ">";
break;
@@ -3430,6 +3495,19 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << '>';
break;
}
+ case CALL_HANDLER_INFO_TYPE: {
+ CallHandlerInfo* info = CallHandlerInfo::cast(this);
+ os << "<CallHandlerInfo ";
+ os << "callback= " << Brief(info->callback());
+ os << ", js_callback= " << Brief(info->js_callback());
+ os << ", data= " << Brief(info->data());
+ if (info->IsSideEffectFreeCallHandlerInfo()) {
+ os << ", side_effect_free= true>";
+ } else {
+ os << ", side_effect_free= false>";
+ }
+ break;
+ }
default:
os << "<Other heap object (" << map()->instance_type() << ")>";
break;
@@ -3456,27 +3534,24 @@ void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
void HeapObject::IterateBody(ObjectVisitor* v) {
Map* m = map();
- IterateBodyFast<ObjectVisitor>(m->instance_type(), SizeFromMap(m), v);
+ IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
}
-
-void HeapObject::IterateBody(InstanceType type, int object_size,
- ObjectVisitor* v) {
- IterateBodyFast<ObjectVisitor>(type, object_size, v);
+void HeapObject::IterateBody(Map* map, int object_size, ObjectVisitor* v) {
+ IterateBodyFast<ObjectVisitor>(map, object_size, v);
}
struct CallIsValidSlot {
template <typename BodyDescriptor>
- static bool apply(HeapObject* obj, int offset, int) {
- return BodyDescriptor::IsValidSlot(obj, offset);
+ static bool apply(Map* map, HeapObject* obj, int offset, int) {
+ return BodyDescriptor::IsValidSlot(map, obj, offset);
}
};
-
-bool HeapObject::IsValidSlot(int offset) {
+bool HeapObject::IsValidSlot(Map* map, int offset) {
DCHECK_NE(0, offset);
- return BodyDescriptorApply<CallIsValidSlot, bool>(map()->instance_type(),
+ return BodyDescriptorApply<CallIsValidSlot, bool>(map->instance_type(), map,
this, offset, 0);
}
@@ -3567,7 +3642,7 @@ bool HeapObject::CanBeRehashed() const {
// TODO(yangguo): actually support rehashing OrderedHash{Map,Set}.
return IsNameDictionary() || IsGlobalDictionary() ||
IsNumberDictionary() || IsSimpleNumberDictionary() ||
- IsStringTable() || IsWeakHashTable();
+ IsStringTable();
case DESCRIPTOR_ARRAY_TYPE:
return true;
case TRANSITION_ARRAY_TYPE:
@@ -3595,8 +3670,6 @@ void HeapObject::RehashBasedOnMap() {
GlobalDictionary::cast(this)->Rehash();
} else if (IsStringTable()) {
StringTable::cast(this)->Rehash();
- } else if (IsWeakHashTable()) {
- WeakHashTable::cast(this)->Rehash();
} else {
UNREACHABLE();
}
@@ -5805,7 +5878,7 @@ Handle<Object> JSFunction::GetName(Isolate* isolate,
if (function->shared()->name_should_print_as_anonymous()) {
return isolate->factory()->anonymous_string();
}
- return handle(function->shared()->name(), isolate);
+ return handle(function->shared()->Name(), isolate);
}
// static
@@ -6146,6 +6219,11 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
case LookupIterator::INTEGER_INDEXED_EXOTIC:
return Just(ABSENT);
case LookupIterator::ACCESSOR:
+ if (it->GetHolder<Object>()->IsJSModuleNamespace()) {
+ return JSModuleNamespace::GetPropertyAttributes(it);
+ } else {
+ return Just(it->property_attributes());
+ }
case LookupIterator::DATA:
return Just(it->property_attributes());
}
@@ -6613,6 +6691,11 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
dictionary = NameDictionary::DeleteEntry(dictionary, entry);
object->SetProperties(*dictionary);
}
+ if (object->map()->is_prototype_map()) {
+ // Invalidate prototype validity cell as this may invalidate transitioning
+ // store IC handlers.
+ JSObject::InvalidatePrototypeChains(object->map());
+ }
}
@@ -7980,8 +8063,9 @@ Maybe<bool> JSReceiver::SetIntegrityLevel(Handle<JSReceiver> receiver,
if (receiver->IsJSObject()) {
Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- if (!object->HasSloppyArgumentsElements()) { // Fast path.
- // prevent memory leaks by not adding unnecessary transitions
+ if (!object->HasSloppyArgumentsElements() &&
+ !object->IsJSModuleNamespace()) { // Fast path.
+ // Prevent memory leaks by not adding unnecessary transitions.
Maybe<bool> test = JSObject::TestIntegrityLevel(object, level);
MAYBE_RETURN(test, Nothing<bool>());
if (test.FromJust()) return test;
@@ -8375,8 +8459,10 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw) {
STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
- // Sealing/freezing sloppy arguments should be handled elsewhere.
+ // Sealing/freezing sloppy arguments or namespace objects should be handled
+ // elsewhere.
DCHECK(!object->HasSloppyArgumentsElements());
+ DCHECK_IMPLIES(object->IsJSModuleNamespace(), attrs == NONE);
Isolate* isolate = object->GetIsolate();
if (object->IsAccessCheckNeeded() &&
@@ -8691,7 +8777,7 @@ bool Map::OnlyHasSimpleProperties() const {
!is_dictionary_map();
}
-MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
+V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
Handle<FixedArray>* result) {
Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
@@ -9046,7 +9132,7 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
- // except for the code cache, which can contain some ics which can be
+ // except for the code cache, which can contain some ICs which can be
// applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(fast_map, mode);
@@ -9054,7 +9140,8 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
// For prototype maps, the PrototypeInfo is not copied.
DCHECK_EQ(0, memcmp(fresh->address(), new_map->address(),
kTransitionsOrPrototypeInfoOffset));
- DCHECK_EQ(fresh->raw_transitions(), Smi::kZero);
+ DCHECK_EQ(fresh->raw_transitions(),
+ MaybeObject::FromObject(Smi::kZero));
STATIC_ASSERT(kDescriptorsOffset ==
kTransitionsOrPrototypeInfoOffset + kPointerSize);
DCHECK_EQ(0, memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
@@ -9066,7 +9153,9 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
}
STATIC_ASSERT(Map::kWeakCellCacheOffset ==
Map::kDependentCodeOffset + kPointerSize);
- int offset = Map::kWeakCellCacheOffset + kPointerSize;
+ STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
+ Map::kWeakCellCacheOffset + kPointerSize);
+ int offset = Map::kPrototypeValidityCellOffset + kPointerSize;
DCHECK_EQ(0, memcmp(fresh->address() + offset,
new_map->address() + offset, Map::kSize - offset));
}
@@ -9655,8 +9744,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
PropertyConstness constness,
- StoreFromKeyed store_mode,
- bool* created_new_map) {
+ StoreFromKeyed store_mode) {
RuntimeCallTimerScope stats_scope(
*map, map->is_prototype_map()
? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
@@ -9671,7 +9759,6 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Map* maybe_transition =
TransitionsAccessor(map).SearchTransition(*name, kData, attributes);
if (maybe_transition != nullptr) {
- *created_new_map = false;
Handle<Map> transition(maybe_transition);
int descriptor = transition->LastAdded();
@@ -9682,7 +9769,6 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
return UpdateDescriptorForValue(transition, descriptor, constness, value);
}
- *created_new_map = true;
TransitionFlag flag = INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!FLAG_track_constant_fields && value->IsJSFunction()) {
@@ -10058,28 +10144,25 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
// static
-void WeakFixedArray::Set(Handle<WeakFixedArray> array, int index,
- Handle<HeapObject> value) {
+void FixedArrayOfWeakCells::Set(Handle<FixedArrayOfWeakCells> array, int index,
+ Handle<HeapObject> value) {
DCHECK(array->IsEmptySlot(index)); // Don't overwrite anything.
Handle<WeakCell> cell =
value->IsMap() ? Map::WeakCellForMap(Handle<Map>::cast(value))
: array->GetIsolate()->factory()->NewWeakCell(value);
Handle<FixedArray>::cast(array)->set(index + kFirstIndex, *cell);
- if (FLAG_trace_weak_arrays) {
- PrintF("[WeakFixedArray: storing at index %d ]\n", index);
- }
array->set_last_used_index(index);
}
// static
-Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
- Handle<HeapObject> value,
- int* assigned_index) {
- Handle<WeakFixedArray> array =
- (maybe_array.is_null() || !maybe_array->IsWeakFixedArray())
- ? Allocate(value->GetIsolate(), 1, Handle<WeakFixedArray>::null())
- : Handle<WeakFixedArray>::cast(maybe_array);
+Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Add(
+ Handle<Object> maybe_array, Handle<HeapObject> value, int* assigned_index) {
+ Handle<FixedArrayOfWeakCells> array =
+ (maybe_array.is_null() || !maybe_array->IsFixedArrayOfWeakCells())
+ ? Allocate(value->GetIsolate(), 1,
+ Handle<FixedArrayOfWeakCells>::null())
+ : Handle<FixedArrayOfWeakCells>::cast(maybe_array);
// Try to store the new entry if there's room. Optimize for consecutive
// accesses.
int first_index = array->last_used_index();
@@ -10087,13 +10170,10 @@ Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
if (length > 0) {
for (int i = first_index;;) {
if (array->IsEmptySlot((i))) {
- WeakFixedArray::Set(array, i, value);
+ FixedArrayOfWeakCells::Set(array, i, value);
if (assigned_index != nullptr) *assigned_index = i;
return array;
}
- if (FLAG_trace_weak_arrays) {
- PrintF("[WeakFixedArray: searching for free slot]\n");
- }
i = (i + 1) % length;
if (i == first_index) break;
}
@@ -10101,19 +10181,15 @@ Handle<WeakFixedArray> WeakFixedArray::Add(Handle<Object> maybe_array,
// No usable slot found, grow the array.
int new_length = length == 0 ? 1 : length + (length >> 1) + 4;
- Handle<WeakFixedArray> new_array =
+ Handle<FixedArrayOfWeakCells> new_array =
Allocate(array->GetIsolate(), new_length, array);
- if (FLAG_trace_weak_arrays) {
- PrintF("[WeakFixedArray: growing to size %d ]\n", new_length);
- }
- WeakFixedArray::Set(new_array, length, value);
+ FixedArrayOfWeakCells::Set(new_array, length, value);
if (assigned_index != nullptr) *assigned_index = length;
return new_array;
}
-
template <class CompactionCallback>
-void WeakFixedArray::Compact() {
+void FixedArrayOfWeakCells::Compact() {
FixedArray* array = FixedArray::cast(this);
int new_length = kFirstIndex;
for (int i = kFirstIndex; i < array->length(); i++) {
@@ -10129,10 +10205,9 @@ void WeakFixedArray::Compact() {
set_last_used_index(0);
}
-
-void WeakFixedArray::Iterator::Reset(Object* maybe_array) {
- if (maybe_array->IsWeakFixedArray()) {
- list_ = WeakFixedArray::cast(maybe_array);
+void FixedArrayOfWeakCells::Iterator::Reset(Object* maybe_array) {
+ if (maybe_array->IsFixedArrayOfWeakCells()) {
+ list_ = FixedArrayOfWeakCells::cast(maybe_array);
index_ = 0;
#ifdef DEBUG
last_used_index_ = list_->last_used_index();
@@ -10152,20 +10227,20 @@ void JSObject::PrototypeRegistryCompactionCallback::Callback(Object* value,
proto_info->set_registry_slot(new_index);
}
-
-template void WeakFixedArray::Compact<WeakFixedArray::NullCallback>();
template void
-WeakFixedArray::Compact<JSObject::PrototypeRegistryCompactionCallback>();
-
+FixedArrayOfWeakCells::Compact<FixedArrayOfWeakCells::NullCallback>();
+template void
+FixedArrayOfWeakCells::Compact<JSObject::PrototypeRegistryCompactionCallback>();
-bool WeakFixedArray::Remove(Handle<HeapObject> value) {
+bool FixedArrayOfWeakCells::Remove(Handle<HeapObject> value) {
if (Length() == 0) return false;
// Optimize for the most recently added element to be removed again.
int first_index = last_used_index();
for (int i = first_index;;) {
if (Get(i) == *value) {
Clear(i);
- // Users of WeakFixedArray should make sure that there are no duplicates.
+ // Users of FixedArrayOfWeakCells should make sure that there are no
+ // duplicates.
return true;
}
i = (i + 1) % Length();
@@ -10176,8 +10251,8 @@ bool WeakFixedArray::Remove(Handle<HeapObject> value) {
// static
-Handle<WeakFixedArray> WeakFixedArray::Allocate(
- Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from) {
+Handle<FixedArrayOfWeakCells> FixedArrayOfWeakCells::Allocate(
+ Isolate* isolate, int size, Handle<FixedArrayOfWeakCells> initialize_from) {
DCHECK_LE(0, size);
Handle<FixedArray> result =
isolate->factory()->NewUninitializedFixedArray(size + kFirstIndex);
@@ -10196,18 +10271,15 @@ Handle<WeakFixedArray> WeakFixedArray::Allocate(
result->set(index, Smi::kZero);
index++;
}
- return Handle<WeakFixedArray>::cast(result);
+ return Handle<FixedArrayOfWeakCells>::cast(result);
}
// static
-Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj,
- AddMode mode) {
+Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj) {
int length = array->Length();
array = EnsureSpace(array, length + 1);
- if (mode == kReloadLengthAfterAllocation) {
- DCHECK(array->Length() <= length);
- length = array->Length();
- }
+ // Check that GC didn't remove elements from the array.
+ DCHECK_EQ(array->Length(), length);
array->Set(length, *obj);
array->SetLength(length + 1);
return array;
@@ -10215,12 +10287,11 @@ Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj,
// static
Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2, AddMode mode) {
+ Handle<Object> obj2) {
int length = array->Length();
array = EnsureSpace(array, length + 2);
- if (mode == kReloadLengthAfterAllocation) {
- length = array->Length();
- }
+ // Check that GC didn't remove elements from the array.
+ DCHECK_EQ(array->Length(), length);
array->Set(length, *obj1);
array->Set(length + 1, *obj2);
array->SetLength(length + 2);
@@ -10310,19 +10381,14 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
// static
Handle<FrameArray> FrameArray::AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, WasmCodeWrapper code, int offset, int flags) {
+ int wasm_function_index, wasm::WasmCode* code, int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
Handle<FrameArray> array = EnsureSpace(in, new_length);
array->SetWasmInstance(frame_count, *wasm_instance);
array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
- // code will be a null handle for interpreted wasm frames.
- if (!code.IsCodeObject()) {
- array->SetIsWasmInterpreterFrame(frame_count, Smi::FromInt(code.is_null()));
- } else {
- if (!code.is_null())
- array->SetCode(frame_count, AbstractCode::cast(*code.GetCode()));
- }
+ // The {code} will be {nullptr} for interpreted wasm frames.
+ array->SetIsWasmInterpreterFrame(frame_count, Smi::FromInt(code == nullptr));
array->SetOffset(frame_count, Smi::FromInt(offset));
array->SetFlags(frame_count, Smi::FromInt(flags));
array->set(kFrameCountIndex, Smi::FromInt(frame_count + 1));
@@ -12173,6 +12239,8 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent;
}
+ DCHECK(!is_compiled() || IsInterpreted());
+ DCHECK(shared()->IsInterpreted());
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
DCHECK(shared()->allows_lazy_compilation() ||
@@ -12194,12 +12262,6 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
}
- if (!IsInterpreted()) {
- // For non I+TF path, install a shim which checks the optimization marker.
- // No write barrier required, since the builtin is part of the root set.
- set_code_no_write_barrier(
- isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
- }
SetOptimizationMarker(mode == ConcurrencyMode::kConcurrent
? OptimizationMarker::kCompileOptimizedConcurrent
: OptimizationMarker::kCompileOptimized);
@@ -12364,8 +12426,8 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
Map::GetOrCreatePrototypeInfo(proto, isolate);
Handle<Object> maybe_registry(proto_info->prototype_users(), isolate);
int slot = 0;
- Handle<WeakFixedArray> new_array =
- WeakFixedArray::Add(maybe_registry, current_user, &slot);
+ Handle<FixedArrayOfWeakCells> new_array =
+ FixedArrayOfWeakCells::Add(maybe_registry, current_user, &slot);
current_user_info->set_registry_slot(slot);
if (!maybe_registry.is_identical_to(new_array)) {
proto_info->set_prototype_users(*new_array);
@@ -12395,7 +12457,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
if (!user->prototype()->IsJSObject()) {
Object* users =
PrototypeInfo::cast(user->prototype_info())->prototype_users();
- return users->IsWeakFixedArray();
+ return users->IsFixedArrayOfWeakCells();
}
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
@@ -12409,9 +12471,9 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
isolate);
Object* maybe_registry = proto_info->prototype_users();
- DCHECK(maybe_registry->IsWeakFixedArray());
- DCHECK(WeakFixedArray::cast(maybe_registry)->Get(slot) == *user);
- WeakFixedArray::cast(maybe_registry)->Clear(slot);
+ DCHECK(maybe_registry->IsFixedArrayOfWeakCells());
+ DCHECK(FixedArrayOfWeakCells::cast(maybe_registry)->Get(slot) == *user);
+ FixedArrayOfWeakCells::cast(maybe_registry)->Clear(slot);
if (FLAG_trace_prototype_users) {
PrintF("Unregistering %p as a user of prototype %p.\n",
reinterpret_cast<void*>(*user), reinterpret_cast<void*>(*prototype));
@@ -12424,29 +12486,27 @@ namespace {
// This function must be kept in sync with
// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
// before jumping here.
-PrototypeInfo* InvalidateOnePrototypeValidityCellInternal(Map* map) {
+void InvalidateOnePrototypeValidityCellInternal(Map* map) {
DCHECK(map->is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map));
}
- Object* maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return nullptr;
- PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
- Object* maybe_cell = proto_info->validity_cell();
+ Object* maybe_cell = map->prototype_validity_cell();
if (maybe_cell->IsCell()) {
// Just set the value; the cell will be replaced lazily.
Cell* cell = Cell::cast(maybe_cell);
cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
- return proto_info;
}
void InvalidatePrototypeChainsInternal(Map* map) {
- PrototypeInfo* proto_info = InvalidateOnePrototypeValidityCellInternal(map);
- if (proto_info == nullptr) return;
+ InvalidateOnePrototypeValidityCellInternal(map);
- WeakFixedArray::Iterator iterator(proto_info->prototype_users());
+ Object* maybe_proto_info = map->prototype_info();
+ if (!maybe_proto_info->IsPrototypeInfo()) return;
+ PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
+ FixedArrayOfWeakCells::Iterator iterator(proto_info->prototype_users());
// For now, only maps register themselves as users.
Map* user;
while ((user = iterator.Next<Map>()) != nullptr) {
@@ -12513,8 +12573,8 @@ void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
}
// static
-Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
- Isolate* isolate) {
+Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
+ Isolate* isolate) {
Handle<Object> maybe_prototype;
if (map->IsJSGlobalObjectMap()) {
DCHECK(map->is_prototype_map());
@@ -12524,21 +12584,17 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
} else {
maybe_prototype =
handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
- if (!maybe_prototype->IsJSReceiver()) return Handle<Cell>::null();
}
- if (maybe_prototype->IsJSProxy()) {
- Handle<Cell> cell = isolate->factory()->NewCell(
- handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
- return cell;
+ if (!maybe_prototype->IsJSObject()) {
+ return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
}
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
// Ensure the prototype is registered with its own prototypes so its cell
// will be invalidated when necessary.
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
isolate);
- Handle<PrototypeInfo> proto_info =
- GetOrCreatePrototypeInfo(prototype, isolate);
- Object* maybe_cell = proto_info->validity_cell();
+
+ Object* maybe_cell = prototype->map()->prototype_validity_cell();
// Return existing cell if it's still valid.
if (maybe_cell->IsCell()) {
Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
@@ -12549,21 +12605,17 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
// Otherwise create a new cell.
Handle<Cell> cell = isolate->factory()->NewCell(
handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
- proto_info->set_validity_cell(*cell);
+ prototype->map()->set_prototype_validity_cell(*cell);
return cell;
}
// static
bool Map::IsPrototypeChainInvalidated(Map* map) {
DCHECK(map->is_prototype_map());
- Object* maybe_proto_info = map->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
- PrototypeInfo* proto_info = PrototypeInfo::cast(maybe_proto_info);
- Object* maybe_cell = proto_info->validity_cell();
- if (maybe_cell->IsCell()) {
- Cell* cell = Cell::cast(maybe_cell);
- return cell->value() == Smi::FromInt(Map::kPrototypeChainInvalid);
- }
+ Object* maybe_cell = map->prototype_validity_cell();
+ if (maybe_cell->IsCell()) {
+ Cell* cell = Cell::cast(maybe_cell);
+ return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
}
return true;
}
@@ -12784,6 +12836,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_VALUE_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case WASM_GLOBAL_TYPE:
case WASM_INSTANCE_TYPE:
case WASM_MEMORY_TYPE:
case WASM_MODULE_TYPE:
@@ -12791,6 +12844,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
return true;
case BIGINT_TYPE:
+ case BOILERPLATE_DESCRIPTION_TYPE:
case BYTECODE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case CELL_TYPE:
@@ -12798,6 +12852,7 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case FILLER_TYPE:
case FIXED_ARRAY_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
+ case FEEDBACK_METADATA_TYPE:
case FOREIGN_TYPE:
case FREE_SPACE_TYPE:
case HASH_TABLE_TYPE:
@@ -13055,7 +13110,7 @@ Handle<String> NativeCodeFunctionSourceString(
Isolate* const isolate = shared_info->GetIsolate();
IncrementalStringBuilder builder(isolate);
builder.AppendCString("function ");
- builder.AppendString(handle(shared_info->name(), isolate));
+ builder.AppendString(handle(shared_info->Name(), isolate));
builder.AppendCString("() { [native code] }");
return builder.Finish().ToHandleChecked();
}
@@ -13128,7 +13183,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (shared_info->name_should_print_as_anonymous()) {
builder.AppendCString("anonymous");
} else if (!shared_info->is_anonymous_expression()) {
- builder.AppendString(handle(shared_info->name(), isolate));
+ builder.AppendString(handle(shared_info->Name(), isolate));
}
}
if (shared_info->is_wrapped()) {
@@ -13403,11 +13458,14 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
// AstTraversalVisitor doesn't recurse properly in the construct which
// triggers the mismatch.
CHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
- Object* shared = shared_function_infos()->get(fun->function_literal_id());
- if (shared->IsUndefined(isolate) || WeakCell::cast(shared)->cleared()) {
+ MaybeObject* shared =
+ shared_function_infos()->Get(fun->function_literal_id());
+ HeapObject* heap_object;
+ if (!shared->ToStrongOrWeakHeapObject(&heap_object) ||
+ heap_object->IsUndefined(isolate)) {
return MaybeHandle<SharedFunctionInfo>();
}
- return handle(SharedFunctionInfo::cast(WeakCell::cast(shared)->value()));
+ return handle(SharedFunctionInfo::cast(heap_object));
}
Script::Iterator::Iterator(Isolate* isolate)
@@ -13422,16 +13480,20 @@ SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
handle(script->shared_function_infos())) {}
SharedFunctionInfo::ScriptIterator::ScriptIterator(
- Isolate* isolate, Handle<FixedArray> shared_function_infos)
+ Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
: isolate_(isolate),
shared_function_infos_(shared_function_infos),
index_(0) {}
SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
while (index_ < shared_function_infos_->length()) {
- Object* raw = shared_function_infos_->get(index_++);
- if (raw->IsUndefined(isolate_) || WeakCell::cast(raw)->cleared()) continue;
- return SharedFunctionInfo::cast(WeakCell::cast(raw)->value());
+ MaybeObject* raw = shared_function_infos_->Get(index_++);
+ HeapObject* heap_object;
+ if (!raw->ToStrongOrWeakHeapObject(&heap_object) ||
+ heap_object->IsUndefined(isolate_)) {
+ continue;
+ }
+ return SharedFunctionInfo::cast(heap_object);
}
return nullptr;
}
@@ -13475,24 +13537,24 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// duplicates.
if (script_object->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_object);
- Handle<FixedArray> list = handle(script->shared_function_infos(), isolate);
+ Handle<WeakFixedArray> list =
+ handle(script->shared_function_infos(), isolate);
#ifdef DEBUG
DCHECK_LT(shared->function_literal_id(), list->length());
- if (list->get(shared->function_literal_id())->IsWeakCell() &&
- !WeakCell::cast(list->get(shared->function_literal_id()))->cleared()) {
- DCHECK(
- WeakCell::cast(list->get(shared->function_literal_id()))->value() ==
- *shared);
+ MaybeObject* maybe_object = list->Get(shared->function_literal_id());
+ HeapObject* heap_object;
+ if (maybe_object->ToWeakHeapObject(&heap_object)) {
+ DCHECK_EQ(heap_object, *shared);
}
#endif
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(shared);
- list->set(shared->function_literal_id(), *cell);
+ list->Set(shared->function_literal_id(),
+ HeapObjectReference::Weak(*shared));
} else {
Handle<Object> list = isolate->factory()->noscript_shared_function_infos();
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
- WeakFixedArray::Iterator iterator(*list);
+ FixedArrayOfWeakCells::Iterator iterator(*list);
SharedFunctionInfo* next;
while ((next = iterator.Next<SharedFunctionInfo>()) != nullptr) {
DCHECK_NE(next, *shared);
@@ -13500,7 +13562,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
}
#endif // DEBUG
- list = WeakFixedArray::Add(list, shared);
+ list = FixedArrayOfWeakCells::Add(list, shared);
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
}
@@ -13511,19 +13573,21 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// Due to liveedit, it might happen that the old_script doesn't know
// about the SharedFunctionInfo, so we have to guard against that.
- Handle<FixedArray> infos(old_script->shared_function_infos(), isolate);
+ Handle<WeakFixedArray> infos(old_script->shared_function_infos(), isolate);
if (shared->function_literal_id() < infos->length()) {
- Object* raw = old_script->shared_function_infos()->get(
+ MaybeObject* raw = old_script->shared_function_infos()->Get(
shared->function_literal_id());
- if (!raw->IsWeakCell() || WeakCell::cast(raw)->value() == *shared) {
- old_script->shared_function_infos()->set(
- shared->function_literal_id(), isolate->heap()->undefined_value());
+ HeapObject* heap_object;
+ if (raw->ToWeakHeapObject(&heap_object) && heap_object == *shared) {
+ old_script->shared_function_infos()->Set(
+ shared->function_literal_id(),
+ HeapObjectReference::Strong(isolate->heap()->undefined_value()));
}
}
} else {
// Remove shared function info from root array.
Object* list = isolate->heap()->noscript_shared_function_infos();
- CHECK(WeakFixedArray::cast(list)->Remove(shared));
+ CHECK(FixedArrayOfWeakCells::cast(list)->Remove(shared));
}
// Finally set new script.
@@ -13537,6 +13601,13 @@ bool SharedFunctionInfo::HasBreakInfo() const {
return has_break_info;
}
+bool SharedFunctionInfo::BreakAtEntry() const {
+ if (!HasDebugInfo()) return false;
+ DebugInfo* info = DebugInfo::cast(debug_info());
+ bool break_at_entry = info->BreakAtEntry();
+ return break_at_entry;
+}
+
bool SharedFunctionInfo::HasCoverageInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo* info = DebugInfo::cast(debug_info());
@@ -13568,15 +13639,19 @@ void SharedFunctionInfo::set_debugger_hints(int value) {
}
String* SharedFunctionInfo::DebugName() {
- if (name()->length() == 0) return inferred_name();
- return name();
+ if (Name()->length() == 0) return inferred_name();
+ return Name();
}
// static
bool SharedFunctionInfo::HasNoSideEffect(Handle<SharedFunctionInfo> info) {
if (!info->computed_has_no_side_effect()) {
- bool has_no_side_effect = DebugEvaluate::FunctionHasNoSideEffect(info);
- info->set_has_no_side_effect(has_no_side_effect);
+ DebugEvaluate::SideEffectState has_no_side_effect =
+ DebugEvaluate::FunctionGetSideEffectState(info);
+ info->set_has_no_side_effect(has_no_side_effect !=
+ DebugEvaluate::kHasSideEffects);
+ info->set_requires_runtime_side_effect_checks(
+ has_no_side_effect == DebugEvaluate::kRequiresRuntimeChecks);
info->set_computed_has_no_side_effect(true);
}
return info->has_no_side_effect();
@@ -13630,8 +13705,8 @@ Handle<Object> SharedFunctionInfo::GetSourceCode(
Isolate* isolate = shared->GetIsolate();
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
Handle<String> source(String::cast(Script::cast(shared->script())->source()));
- return isolate->factory()->NewSubString(source, shared->start_position(),
- shared->end_position());
+ return isolate->factory()->NewSubString(source, shared->StartPosition(),
+ shared->EndPosition());
}
// static
@@ -13642,15 +13717,15 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
Handle<String> script_source(
String::cast(Script::cast(shared->script())->source()));
int start_pos = shared->function_token_position();
- if (start_pos == kNoSourcePosition) start_pos = shared->start_position();
+ if (start_pos == kNoSourcePosition) start_pos = shared->StartPosition();
Handle<String> source = isolate->factory()->NewSubString(
- script_source, start_pos, shared->end_position());
+ script_source, start_pos, shared->EndPosition());
if (!shared->is_wrapped()) return source;
DCHECK(!shared->name_should_print_as_anonymous());
IncrementalStringBuilder builder(isolate);
builder.AppendCString("function ");
- builder.AppendString(Handle<String>(shared->name(), isolate));
+ builder.AppendString(Handle<String>(shared->Name(), isolate));
builder.AppendCString("(");
Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments());
int argc = args->length();
@@ -13675,9 +13750,7 @@ bool SharedFunctionInfo::IsInlineable() {
return !optimization_disabled();
}
-int SharedFunctionInfo::SourceSize() {
- return end_position() - start_position();
-}
+int SharedFunctionInfo::SourceSize() { return EndPosition() - StartPosition(); }
void JSFunction::CalculateInstanceSizeHelper(InstanceType instance_type,
bool has_prototype_slot,
@@ -13761,19 +13834,19 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
if (!s->is_toplevel()) {
os << "function ";
- String* name = s->name();
+ String* name = s->Name();
if (name->length() > 0) {
name->PrintUC16(os);
}
}
- int len = s->end_position() - s->start_position();
+ int len = s->EndPosition() - s->StartPosition();
if (len <= v.max_length || v.max_length < 0) {
- script_source->PrintUC16(os, s->start_position(), s->end_position());
+ script_source->PrintUC16(os, s->StartPosition(), s->EndPosition());
return os;
} else {
- script_source->PrintUC16(os, s->start_position(),
- s->start_position() + v.max_length);
+ script_source->PrintUC16(os, s->StartPosition(),
+ s->StartPosition() + v.max_length);
return os << "...\n";
}
}
@@ -13782,8 +13855,7 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- set_compiler_hints(
- DisabledOptimizationReasonBits::update(compiler_hints(), reason));
+ set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
// Code should be the lazy compilation stub or else interpreted.
DCHECK(abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
abstract_code()->kind() == AbstractCode::BUILTIN);
@@ -13796,13 +13868,18 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
}
void SharedFunctionInfo::InitFromFunctionLiteral(
- Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
+ Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit,
+ bool is_toplevel) {
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
- shared_info->set_start_position(lit->start_position());
- shared_info->set_end_position(lit->end_position());
+ shared_info->set_raw_start_position(lit->start_position());
+ shared_info->set_raw_end_position(lit->end_position());
+ if (shared_info->scope_info()->HasPositionInfo()) {
+ shared_info->scope_info()->SetPositionInfo(lit->start_position(),
+ lit->end_position());
+ }
shared_info->set_is_declaration(lit->is_declaration());
shared_info->set_is_named_expression(lit->is_named_expression());
shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
@@ -13813,16 +13890,22 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// shared_info->set_kind(lit->kind());
// FunctionKind must have already been set.
DCHECK(lit->kind() == shared_info->kind());
- if (!IsConstructable(lit->kind())) {
- shared_info->SetConstructStub(
- *BUILTIN_CODE(shared_info->GetIsolate(), ConstructedNonConstructable));
- }
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_function_literal_id(lit->function_literal_id());
DCHECK_IMPLIES(lit->requires_instance_fields_initializer(),
IsClassConstructor(lit->kind()));
shared_info->set_requires_instance_fields_initializer(
lit->requires_instance_fields_initializer());
+
+ shared_info->set_is_toplevel(is_toplevel);
+ DCHECK(shared_info->outer_scope_info()->IsTheHole(shared_info->GetIsolate()));
+ if (!is_toplevel) {
+ Scope* outer_scope = lit->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ shared_info->set_outer_scope_info(*outer_scope->scope_info());
+ }
+ }
+
// For lazy parsed functions, the following flags will be inaccurate since we
// don't have the information yet. They're set later in
// SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
@@ -13867,21 +13950,6 @@ void SharedFunctionInfo::SetExpectedNofPropertiesFromEstimate(
set_expected_nof_properties(estimate);
}
-void SharedFunctionInfo::SetConstructStub(Code* code) {
- if (code->kind() == Code::BUILTIN) code->set_is_construct_stub(true);
-#ifdef DEBUG
- if (code->is_builtin()) {
- // See https://crbug.com/v8/6787. Lazy deserialization currently cannot
- // handle lazy construct stubs that differ from the code object.
- int builtin_id = code->builtin_index();
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK(builtin_id == Builtins::kJSBuiltinsConstructStub ||
- this->code() == code || !Builtins::IsLazy(builtin_id));
- }
-#endif
- set_construct_stub(code);
-}
-
void Map::StartInobjectSlackTracking() {
DCHECK(!IsInobjectSlackTrackingInProgress());
if (UnusedPropertyFields() == 0) return;
@@ -13918,22 +13986,16 @@ void Code::InvalidateEmbeddedObjects() {
void Code::Relocate(intptr_t delta) {
- if (trap_handler::IsTrapHandlerEnabled() && is_wasm_code()) {
- const int index = trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::UpdateHandlerDataCodePointer(index, instruction_start());
- }
- }
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- Assembler::FlushICache(instruction_start(), instruction_size());
+ Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
}
void Code::CopyFrom(const CodeDesc& desc) {
// copy code
- CopyBytes(instruction_start(), desc.buffer,
+ CopyBytes(raw_instruction_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
// copy unwinding info, if any
@@ -13968,18 +14030,18 @@ void Code::CopyFrom(const CodeDesc& desc) {
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
- it.rinfo()->set_target_address(code->instruction_start(),
+ it.rinfo()->set_target_address(code->raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
it.rinfo()->set_target_runtime_entry(p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
} else {
- intptr_t delta = instruction_start() - desc.buffer;
+ intptr_t delta = raw_instruction_start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
- Assembler::FlushICache(instruction_start(), instruction_size());
+ Assembler::FlushICache(raw_instruction_start(), raw_instruction_size());
}
@@ -13988,26 +14050,31 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
return table.FindEntry(pc);
}
-int Code::OffHeapInstructionSize() {
- DCHECK(Builtins::IsOffHeapBuiltin(this));
- InstructionStream* stream =
- InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
- return static_cast<int>(stream->byte_length());
+#ifdef V8_EMBEDDED_BUILTINS
+int Code::OffHeapInstructionSize() const {
+ DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_size();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return d.InstructionSizeOfBuiltin(builtin_index());
}
-Address Code::OffHeapInstructionStart() {
- DCHECK(Builtins::IsOffHeapBuiltin(this));
- InstructionStream* stream =
- InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
- return stream->bytes();
+Address Code::OffHeapInstructionStart() const {
+ DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_start();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return reinterpret_cast<Address>(
+ const_cast<uint8_t*>(d.InstructionStartOfBuiltin(builtin_index())));
}
-Address Code::OffHeapInstructionEnd() {
- DCHECK(Builtins::IsOffHeapBuiltin(this));
- InstructionStream* stream =
- InstructionStream::TryLookupInstructionStream(GetIsolate(), this);
- return stream->bytes() + stream->byte_length();
+Address Code::OffHeapInstructionEnd() const {
+ DCHECK(Builtins::IsEmbeddedBuiltin(this));
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return raw_instruction_end();
+ EmbeddedData d = EmbeddedData::FromBlob();
+ return reinterpret_cast<Address>(
+ const_cast<uint8_t*>(d.InstructionStartOfBuiltin(builtin_index()) +
+ d.InstructionSizeOfBuiltin(builtin_index())));
}
+#endif
namespace {
template <typename Code>
@@ -14113,7 +14180,7 @@ void Code::PrintDeoptLocation(FILE* out, const char* str, Address pc) {
bool Code::CanDeoptAt(Address pc) {
DeoptimizationData* deopt_data =
DeoptimizationData::cast(deoptimization_data());
- Address code_start_address = instruction_start();
+ Address code_start_address = InstructionStart();
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address address = code_start_address + deopt_data->Pc(i)->value();
@@ -14144,6 +14211,35 @@ const char* AbstractCode::Kind2String(Kind kind) {
UNREACHABLE();
}
+#ifdef V8_EMBEDDED_BUILTINS
+bool Code::IsProcessIndependent() {
+ constexpr int all_real_modes_mask =
+ (1 << (RelocInfo::LAST_REAL_RELOC_MODE + 1)) - 1;
+ constexpr int mode_mask =
+ all_real_modes_mask & ~RelocInfo::ModeMask(RelocInfo::COMMENT) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
+ ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
+ ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
+ ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
+ ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::LAST_REAL_RELOC_MODE == RelocInfo::VENEER_POOL);
+ STATIC_ASSERT(RelocInfo::ModeMask(RelocInfo::COMMENT) ==
+ (1 << RelocInfo::COMMENT));
+ STATIC_ASSERT(
+ mode_mask ==
+ (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_HANDLE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE)));
+
+ RelocIterator it(this, mode_mask);
+ return it.done();
+}
+#endif
+
Handle<WeakCell> Code::WeakCellFor(Handle<Code> code) {
DCHECK(code->kind() == OPTIMIZED_FUNCTION);
WeakCell* raw_cell = code->CachedWeakCell();
@@ -14303,7 +14399,8 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
case Translation::BUILTIN_CONTINUATION_FRAME:
- case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME:
+ case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
Object* shared_info = LiteralArray()->get(shared_info_id);
@@ -14457,7 +14554,7 @@ void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
} else {
// There are some handlers and ICs that we can also find names for with
// Builtins::Lookup.
- name = GetIsolate()->builtins()->Lookup(instruction_start());
+ name = GetIsolate()->builtins()->Lookup(raw_instruction_start());
if (name != nullptr) {
os << "name = " << name << "\n";
}
@@ -14468,20 +14565,20 @@ void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "compiler = " << (is_turbofanned() ? "turbofan" : "unknown") << "\n";
os << "address = " << static_cast<const void*>(this) << "\n";
- os << "Body (size = " << instruction_size() << ")\n";
+ os << "Body (size = " << InstructionSize() << ")\n";
{
Isolate* isolate = GetIsolate();
- int size = instruction_size();
+ int size = InstructionSize();
int safepoint_offset =
has_safepoint_info() ? safepoint_table_offset() : size;
- int constant_pool_offset = FLAG_enable_embedded_constant_pool
- ? this->constant_pool_offset()
- : size;
+ int constant_pool_offset = this->constant_pool_offset();
+ int handler_offset = handler_table_offset() ? handler_table_offset() : size;
// Stop before reaching any embedded tables
- int code_size = Min(safepoint_offset, constant_pool_offset);
+ int code_size =
+ Min(handler_offset, Min(safepoint_offset, constant_pool_offset));
os << "Instructions (size = " << code_size << ")\n";
- byte* begin = instruction_start();
+ byte* begin = InstructionStart();
byte* end = begin + code_size;
Disassembler::Decode(isolate, &os, begin, end, this, current_pc);
@@ -14522,7 +14619,7 @@ void Code::Disassemble(const char* name, std::ostream& os, void* current_pc) {
os << "Safepoints (size = " << table.size() << ")\n";
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
- os << static_cast<const void*>(instruction_start() + pc_offset) << " ";
+ os << static_cast<const void*>(InstructionStart() + pc_offset) << " ";
os << std::setw(6) << std::hex << pc_offset << " " << std::setw(4);
int trampoline_pc = table.GetTrampolinePcOffset(i);
print_pc(os, trampoline_pc);
@@ -14888,7 +14985,6 @@ bool DependentCode::MarkCodeForDeoptimization(
DisallowHeapAllocation no_allocation_scope;
// Mark all the code that needs to be deoptimized.
bool marked = false;
- bool invalidate_embedded_objects = group == kWeakCodeGroup;
int count = this->count();
for (int i = 0; i < count; i++) {
Object* obj = object_at(i);
@@ -14897,10 +14993,7 @@ bool DependentCode::MarkCodeForDeoptimization(
if (cell->cleared()) continue;
Code* code = Code::cast(cell->value());
if (!code->marked_for_deoptimization()) {
- SetMarkedForDeoptimization(code, group);
- if (invalidate_embedded_objects) {
- code->InvalidateEmbeddedObjects();
- }
+ code->SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
}
} else {
@@ -14930,27 +15023,24 @@ void DependentCode::DeoptimizeDependentCodeGroup(
}
}
-
-void DependentCode::SetMarkedForDeoptimization(Code* code,
- DependencyGroup group) {
- code->set_marked_for_deoptimization(true);
+void Code::SetMarkedForDeoptimization(const char* reason) {
+ set_marked_for_deoptimization(true);
if (FLAG_trace_deopt &&
- (code->deoptimization_data() != code->GetHeap()->empty_fixed_array())) {
+ (deoptimization_data() != GetHeap()->empty_fixed_array())) {
DeoptimizationData* deopt_data =
- DeoptimizationData::cast(code->deoptimization_data());
- CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
- PrintF(scope.file(), "[marking dependent code 0x%08" V8PRIxPTR
- " (opt #%d) for deoptimization, reason: %s]\n",
- reinterpret_cast<intptr_t>(code),
- deopt_data->OptimizationId()->value(), DependencyGroupName(group));
+ DeoptimizationData::cast(deoptimization_data());
+ CodeTracer::Scope scope(GetHeap()->isolate()->GetCodeTracer());
+ PrintF(scope.file(),
+ "[marking dependent code 0x%08" V8PRIxPTR
+ " (opt #%d) for deoptimization, reason: %s]\n",
+ reinterpret_cast<intptr_t>(this),
+ deopt_data->OptimizationId()->value(), reason);
}
}
const char* DependentCode::DependencyGroupName(DependencyGroup group) {
switch (group) {
- case kWeakCodeGroup:
- return "weak-code";
case kTransitionGroup:
return "transition";
case kPrototypeCheckGroup:
@@ -15701,7 +15791,8 @@ bool JSObject::WasConstructedFromApiFunction() {
bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
bool is_wasm_object =
- instance_type == WASM_MEMORY_TYPE || instance_type == WASM_MODULE_TYPE ||
+ instance_type == WASM_GLOBAL_TYPE || instance_type == WASM_MEMORY_TYPE ||
+ instance_type == WASM_MODULE_TYPE ||
instance_type == WASM_INSTANCE_TYPE || instance_type == WASM_TABLE_TYPE;
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -16342,7 +16433,7 @@ Handle<Derived> HashTable<Derived, Shape>::New(
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
+ isolate->heap()->FatalProcessOutOfMemory("invalid table size");
}
return NewInternal(isolate, capacity, pretenure);
}
@@ -16505,19 +16596,21 @@ bool HashTable<Derived, Shape>::HasSufficientCapacityToAdd(
}
template <typename Derived, typename Shape>
-Handle<Derived> HashTable<Derived, Shape>::Shrink(Handle<Derived> table) {
+Handle<Derived> HashTable<Derived, Shape>::Shrink(Handle<Derived> table,
+ int additionalCapacity) {
int capacity = table->Capacity();
int nof = table->NumberOfElements();
// Shrink to fit the number of elements if only a quarter of the
// capacity is filled with elements.
if (nof > (capacity >> 2)) return table;
- // Allocate a new dictionary with room for at least the current
- // number of elements. The allocation method will make sure that
- // there is extra room in the dictionary for additions. Don't go
- // lower than room for 16 elements.
- int at_least_room_for = nof;
- if (at_least_room_for < 16) return table;
+ // Allocate a new dictionary with room for at least the current number of
+ // elements + {additionalCapacity}. The allocation method will make sure that
+ // there is extra room in the dictionary for additions. Don't go lower than
+ // room for {kMinShrinkCapacity} elements.
+ int at_least_room_for = nof + additionalCapacity;
+ DCHECK_LE(at_least_room_for, capacity);
+ if (at_least_room_for < Derived::kMinShrinkCapacity) return table;
Isolate* isolate = table->GetIsolate();
const int kMinCapacityForPretenure = 256;
@@ -16557,8 +16650,6 @@ template class HashTable<CompilationCacheTable, CompilationCacheShape>;
template class HashTable<ObjectHashTable, ObjectHashTableShape>;
-template class HashTable<WeakHashTable, WeakHashTableShape>;
-
template class Dictionary<NameDictionary, NameDictionaryShape>;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape>;
@@ -16620,8 +16711,9 @@ HashTable<ObjectHashSet, ObjectHashSetShape>::New(Isolate*, int n,
PretenureFlag,
MinimumCapacity);
-template Handle<NameDictionary> HashTable<
- NameDictionary, NameDictionaryShape>::Shrink(Handle<NameDictionary>);
+template Handle<NameDictionary>
+HashTable<NameDictionary, NameDictionaryShape>::Shrink(Handle<NameDictionary>,
+ int additionalCapacity);
template Handle<NameDictionary>
BaseNameDictionary<NameDictionary, NameDictionaryShape>::Add(
@@ -17027,25 +17119,46 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
return handle(String::cast(table->KeyAt(entry)), isolate);
}
+ table = StringTable::CautiousShrink(table);
// Adding new string. Grow table if needed.
table = StringTable::EnsureCapacity(table, 1);
+ isolate->heap()->SetRootStringTable(*table);
+ return AddKeyNoResize(isolate, key);
+}
+
+Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
+ StringTableKey* key) {
+ Handle<StringTable> table = isolate->factory()->string_table();
+ DCHECK(table->HasSufficientCapacityToAdd(1));
// Create string object.
Handle<String> string = key->AsHandle(isolate);
// There must be no attempts to internalize strings that could throw
// InvalidStringLength error.
CHECK(!string.is_null());
DCHECK(string->HasHashCode());
+ DCHECK_EQ(table->FindEntry(key), kNotFound);
// Add the new string and return it along with the string table.
- entry = table->FindInsertionEntry(key->Hash());
+ int entry = table->FindInsertionEntry(key->Hash());
table->set(EntryToIndex(entry), *string);
table->ElementAdded();
- isolate->heap()->SetRootStringTable(*table);
return Handle<String>::cast(string);
}
+Handle<StringTable> StringTable::CautiousShrink(Handle<StringTable> table) {
+ // Only shrink if the table is very empty to avoid performance penalty.
+ int capacity = table->Capacity();
+ int nof = table->NumberOfElements();
+ if (capacity <= StringTable::kMinCapacity) return table;
+ if (nof > (capacity / kMaxEmptyFactor)) return table;
+ // Make sure that after shrinking the table is half empty (aka. has capacity
+ // for another {nof} elements).
+ DCHECK_LE(nof * 2, capacity);
+ return Shrink(table, nof);
+}
+
namespace {
class StringTableNoAllocateKey : public StringTableKey {
@@ -17061,12 +17174,22 @@ class StringTableNoAllocateKey : public StringTableKey {
special_flattening_ = true;
uint32_t hash_field = 0;
if (one_byte_) {
- one_byte_content_ = new uint8_t[length];
+ if (V8_LIKELY(length <=
+ static_cast<int>(arraysize(one_byte_buffer_)))) {
+ one_byte_content_ = one_byte_buffer_;
+ } else {
+ one_byte_content_ = new uint8_t[length];
+ }
String::WriteToFlat(string, one_byte_content_, 0, length);
hash_field =
StringHasher::HashSequentialString(one_byte_content_, length, seed);
} else {
- two_byte_content_ = new uint16_t[length];
+ if (V8_LIKELY(length <=
+ static_cast<int>(arraysize(two_byte_buffer_)))) {
+ two_byte_content_ = two_byte_buffer_;
+ } else {
+ two_byte_content_ = new uint16_t[length];
+ }
String::WriteToFlat(string, two_byte_content_, 0, length);
hash_field =
StringHasher::HashSequentialString(two_byte_content_, length, seed);
@@ -17084,9 +17207,9 @@ class StringTableNoAllocateKey : public StringTableKey {
~StringTableNoAllocateKey() {
if (one_byte_) {
- delete[] one_byte_content_;
+ if (one_byte_content_ != one_byte_buffer_) delete[] one_byte_content_;
} else {
- delete[] two_byte_content_;
+ if (two_byte_content_ != two_byte_buffer_) delete[] two_byte_content_;
}
}
@@ -17148,7 +17271,7 @@ class StringTableNoAllocateKey : public StringTableKey {
}
}
- MUST_USE_RESULT Handle<String> AsHandle(Isolate* isolate) override {
+ V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) override {
UNREACHABLE();
}
@@ -17160,6 +17283,10 @@ class StringTableNoAllocateKey : public StringTableKey {
uint8_t* one_byte_content_;
uint16_t* two_byte_content_;
};
+ union {
+ uint8_t one_byte_buffer_[256];
+ uint16_t two_byte_buffer_[128];
+ };
};
} // namespace
@@ -17519,7 +17646,7 @@ void CompilationCacheTable::Age() {
}
} else if (get(entry_index)->IsFixedArray()) {
SharedFunctionInfo* info = SharedFunctionInfo::cast(get(value_index));
- if (info->IsInterpreted() && info->bytecode_array()->IsOld()) {
+ if (info->IsInterpreted() && info->GetBytecodeArray()->IsOld()) {
for (int i = 0; i < kEntrySize; i++) {
NoWriteBarrierSet(this, entry_index + i, the_hole_value);
}
@@ -18062,48 +18189,6 @@ void ObjectHashTable::RemoveEntry(int entry) {
ElementRemoved();
}
-
-Object* WeakHashTable::Lookup(Handle<HeapObject> key) {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- DCHECK(IsKey(isolate, *key));
- int entry = FindEntry(key);
- if (entry == kNotFound) return isolate->heap()->the_hole_value();
- return get(EntryToValueIndex(entry));
-}
-
-
-Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
- Handle<HeapObject> key,
- Handle<HeapObject> value) {
- Isolate* isolate = key->GetIsolate();
- DCHECK(table->IsKey(isolate, *key));
- int entry = table->FindEntry(key);
- // Key is already in table, just overwrite value.
- if (entry != kNotFound) {
- table->set(EntryToValueIndex(entry), *value);
- return table;
- }
-
- Handle<WeakCell> key_cell = isolate->factory()->NewWeakCell(key);
-
- // Check whether the hash table should be extended.
- table = EnsureCapacity(table, 1, TENURED);
-
- uint32_t hash = ShapeT::Hash(isolate, key);
- table->AddEntry(table->FindInsertionEntry(hash), key_cell, value);
- return table;
-}
-
-
-void WeakHashTable::AddEntry(int entry, Handle<WeakCell> key_cell,
- Handle<HeapObject> value) {
- DisallowHeapAllocation no_allocation;
- set(EntryToIndex(entry), *key_cell);
- set(EntryToValueIndex(entry), *value);
- ElementAdded();
-}
-
template <class Derived, int entrysize>
Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure) {
@@ -18114,7 +18199,7 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
// field of this object.
capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
if (capacity > kMaxCapacity) {
- v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
+ isolate->heap()->FatalProcessOutOfMemory("invalid table size");
}
int num_buckets = capacity / kLoadFactor;
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArrayWithMap(
@@ -19024,12 +19109,12 @@ Handle<String> JSMessageObject::GetSourceLine() const {
void JSArrayBuffer::Neuter() {
CHECK(is_neuterable());
+ CHECK(!was_neutered());
CHECK(is_external());
set_backing_store(nullptr);
set_byte_length(Smi::kZero);
- set_allocation_base(nullptr);
- set_allocation_length(0);
set_was_neutered(true);
+ set_is_neuterable(false);
// Invalidate the neutering protector.
Isolate* const isolate = GetIsolate();
if (isolate->IsArrayBufferNeuteringIntact()) {
@@ -19037,50 +19122,45 @@ void JSArrayBuffer::Neuter() {
}
}
-void JSArrayBuffer::FreeBackingStore() {
+void JSArrayBuffer::FreeBackingStoreFromMainThread() {
if (allocation_base() == nullptr) {
return;
}
- using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
- const size_t length = allocation_length();
- const AllocationMode mode = allocation_mode();
- FreeBackingStore(GetIsolate(), {allocation_base(), length, mode});
-
+ FreeBackingStore(GetIsolate(),
+ {allocation_base(), allocation_length(), backing_store(),
+ allocation_mode(), is_wasm_memory()});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
set_backing_store(nullptr);
- // TODO(eholk): set_byte_length(0) once we aren't using Smis for the
- // byte_length. We can't do it now because the GC needs to call
- // FreeBackingStore while it is collecting.
- set_allocation_base(nullptr);
- set_allocation_length(0);
}
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
- // TODO(eholk): check with WasmAllocationTracker to make sure this is
- // actually a buffer we are tracking.
- isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
- allocation.length);
- CHECK(FreePages(allocation.allocation_base, allocation.length));
+ bool needs_free = true;
+ if (allocation.is_wasm_memory) {
+ wasm::WasmMemoryTracker* memory_tracker =
+ isolate->wasm_engine()->memory_tracker();
+ if (memory_tracker->FreeMemoryIfIsWasmMemory(allocation.backing_store)) {
+ needs_free = false;
+ }
+ }
+ if (needs_free) {
+ CHECK(FreePages(allocation.allocation_base, allocation.length));
+ }
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
}
}
-void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- bool is_external, void* data, size_t allocated_length,
- SharedFlag shared) {
- return Setup(array_buffer, isolate, is_external, data, allocated_length, data,
- allocated_length, shared);
+void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
+ set_bit_field(IsWasmMemory::update(bit_field(), is_wasm_memory));
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- bool is_external, void* allocation_base,
- size_t allocation_length, void* data,
- size_t byte_length, SharedFlag shared) {
+ bool is_external, void* data, size_t byte_length,
+ SharedFlag shared, bool is_wasm_memory) {
DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
v8::ArrayBuffer::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
@@ -19090,6 +19170,7 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
array_buffer->set_is_external(is_external);
array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
array_buffer->set_is_shared(shared == SharedFlag::kShared);
+ array_buffer->set_is_wasm_memory(is_wasm_memory);
Handle<Object> heap_byte_length =
isolate->factory()->NewNumberFromSize(byte_length);
@@ -19101,9 +19182,6 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
// already been promoted.
array_buffer->set_backing_store(data);
- array_buffer->set_allocation_base(allocation_base);
- array_buffer->set_allocation_length(allocation_length);
-
if (data && !is_external) {
isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
}
@@ -19151,23 +19229,21 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
return true;
}
-
Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array) {
+ DCHECK(typed_array->is_on_heap());
- Handle<Map> map(typed_array->map());
Isolate* isolate = typed_array->GetIsolate();
- DCHECK(IsFixedTypedArrayElementsKind(map->elements_kind()));
+ DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
Handle<FixedTypedArrayBase> fixed_typed_array(
FixedTypedArrayBase::cast(typed_array->elements()));
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
- // This code does not know how to materialize from a buffer with guard
- // regions.
- DCHECK(!buffer->has_guard_region());
+ // This code does not know how to materialize from wasm buffers.
+ DCHECK(!buffer->is_wasm_memory());
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
@@ -19181,8 +19257,6 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
// registration method below handles the case of registering a buffer that has
// already been promoted.
buffer->set_backing_store(backing_store);
- buffer->set_allocation_base(backing_store);
- buffer->set_allocation_length(NumberToSize(buffer->byte_length()));
// RegisterNewArrayBuffer expects a valid length for adjusting counters.
isolate->heap()->RegisterNewArrayBuffer(*buffer);
memcpy(buffer->backing_store(),
@@ -19194,17 +19268,14 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
static_cast<uint8_t*>(buffer->backing_store()));
typed_array->set_elements(*new_elements);
+ DCHECK(!typed_array->is_on_heap());
return buffer;
}
-
Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
- Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
- GetIsolate());
- if (array_buffer->was_neutered() ||
- array_buffer->backing_store() != nullptr ||
- array_buffer->has_guard_region()) {
+ if (!is_on_heap()) {
+ Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()));
return array_buffer;
}
Handle<JSTypedArray> self(this);
@@ -19365,7 +19436,7 @@ int JSGeneratorObject::source_position() const {
// is used in the source position table, hence the subtraction.
code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
AbstractCode* code =
- AbstractCode::cast(function()->shared()->bytecode_array());
+ AbstractCode::cast(function()->shared()->GetBytecodeArray());
return code->SourcePosition(code_offset);
}
@@ -19426,46 +19497,11 @@ MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
return MaybeHandle<Name>();
}
-// static
-ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
- DCHECK_GE(type, FIRST_ARRAY_ITERATOR_TYPE);
- DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
+#undef FIELD_ADDR
+#undef FIELD_ADDR_CONST
+#undef READ_INT32_FIELD
+#undef READ_INT64_FIELD
+#undef READ_BYTE_FIELD
- if (type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
- // Should be ignored for key iterators.
- return PACKED_ELEMENTS;
- } else {
- ElementsKind kind;
- if (type < FIRST_ARRAY_VALUE_ITERATOR_TYPE) {
- // Convert `type` to a value iterator from an entries iterator
- type = static_cast<InstanceType>(type +
- (FIRST_ARRAY_VALUE_ITERATOR_TYPE -
- FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE));
- DCHECK_GE(type, FIRST_ARRAY_VALUE_ITERATOR_TYPE);
- DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
- }
-
- if (type <= JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) {
- kind =
- static_cast<ElementsKind>(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- (type - FIRST_ARRAY_VALUE_ITERATOR_TYPE));
- DCHECK_LE(kind, LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
- } else if (type < JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) {
- kind = static_cast<ElementsKind>(
- FIRST_FAST_ELEMENTS_KIND +
- (type - JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE));
- DCHECK_LE(kind, LAST_FAST_ELEMENTS_KIND);
- } else {
- // For any slow element cases, the actual elements kind is not known.
- // Simply
- // return a slow elements kind in this case. Users of this function must
- // not
- // depend on this.
- return DICTIONARY_ELEMENTS;
- }
- DCHECK_LE(kind, LAST_ELEMENTS_KIND);
- return kind;
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index a9da77fce3..72c31a35de 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -12,7 +12,6 @@
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
-#include "src/builtins/builtins-definitions.h"
#include "src/checks.h"
#include "src/elements-kind.h"
#include "src/field-index.h"
@@ -73,6 +72,7 @@
// - JSDate
// - JSMessageObject
// - JSModuleNamespace
+// - WasmGlobalObject
// - WasmInstanceObject
// - WasmMemoryObject
// - WasmModuleObject
@@ -100,7 +100,7 @@
// - ScopeInfo
// - ModuleInfo
// - ScriptContextTable
-// - WeakFixedArray
+// - FixedArrayOfWeakCells
// - WasmSharedModuleData
// - WasmCompiledModule
// - FixedDoubleArray
@@ -217,14 +217,26 @@ static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
}
static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
- KeyedAccessStoreMode store_mode) {
- if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
- return store_mode;
+ KeyedAccessStoreMode store_mode, bool receiver_was_cow) {
+ switch (store_mode) {
+ case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_TRANSITION_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_TO_DOUBLE:
+ store_mode = STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+ break;
+ case STANDARD_STORE:
+ case STORE_TRANSITION_TO_OBJECT:
+ case STORE_TRANSITION_TO_DOUBLE:
+ store_mode =
+ receiver_was_cow ? STORE_NO_TRANSITION_HANDLE_COW : STANDARD_STORE;
+ break;
+ case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_NO_TRANSITION_HANDLE_COW:
+ break;
}
- if (store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
- return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
- }
- return STANDARD_STORE;
+ DCHECK(!IsTransitionStoreMode(store_mode));
+ DCHECK_IMPLIES(receiver_was_cow, IsCOWHandlingStoreMode(store_mode));
+ return store_mode;
}
@@ -360,6 +372,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_BIGUINT64_ARRAY_TYPE) \
\
V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(FEEDBACK_METADATA_TYPE) \
V(FILLER_TYPE) \
\
V(ACCESS_CHECK_INFO_TYPE) \
@@ -373,6 +386,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(DEBUG_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
+ V(INTERPRETER_DATA_TYPE) \
V(MODULE_INFO_ENTRY_TYPE) \
V(MODULE_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
@@ -383,6 +397,9 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(STACK_FRAME_INFO_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
+ V(WASM_COMPILED_MODULE_TYPE) \
+ V(WASM_DEBUG_INFO_TYPE) \
+ V(WASM_SHARED_MODULE_DATA_TYPE) \
\
V(CALLABLE_TASK_TYPE) \
V(CALLBACK_TASK_TYPE) \
@@ -391,11 +408,23 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
+ V(BOILERPLATE_DESCRIPTION_TYPE) \
V(DESCRIPTOR_ARRAY_TYPE) \
V(HASH_TABLE_TYPE) \
V(SCOPE_INFO_TYPE) \
V(TRANSITION_ARRAY_TYPE) \
\
+ V(BLOCK_CONTEXT_TYPE) \
+ V(CATCH_CONTEXT_TYPE) \
+ V(DEBUG_EVALUATE_CONTEXT_TYPE) \
+ V(EVAL_CONTEXT_TYPE) \
+ V(FUNCTION_CONTEXT_TYPE) \
+ V(MODULE_CONTEXT_TYPE) \
+ V(NATIVE_CONTEXT_TYPE) \
+ V(SCRIPT_CONTEXT_TYPE) \
+ V(WITH_CONTEXT_TYPE) \
+ \
+ V(CALL_HANDLER_INFO_TYPE) \
V(CELL_TYPE) \
V(CODE_DATA_CONTAINER_TYPE) \
V(FEEDBACK_CELL_TYPE) \
@@ -408,6 +437,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(SMALL_ORDERED_HASH_SET_TYPE) \
V(STORE_HANDLER_TYPE) \
V(WEAK_CELL_TYPE) \
+ V(WEAK_FIXED_ARRAY_TYPE) \
\
V(JS_PROXY_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
@@ -420,6 +450,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(JS_ARGUMENTS_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_ARRAY_ITERATOR_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
@@ -434,6 +465,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MESSAGE_OBJECT_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
+ V(JS_REGEXP_STRING_ITERATOR_TYPE) \
V(JS_SET_TYPE) \
V(JS_SET_KEY_VALUE_ITERATOR_TYPE) \
V(JS_SET_VALUE_ITERATOR_TYPE) \
@@ -444,8 +476,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
\
- ARRAY_ITERATOR_TYPE_LIST(V) \
- \
+ V(WASM_GLOBAL_TYPE) \
V(WASM_INSTANCE_TYPE) \
V(WASM_MEMORY_TYPE) \
V(WASM_MODULE_TYPE) \
@@ -507,51 +538,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string, \
ThinOneByteString)
-#define ARRAY_ITERATOR_TYPE_LIST(V) \
- V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE) \
- \
- V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_BIGUINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_BIGINT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_BIGUINT64_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_BIGINT64_ARRAY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE)
-
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
// code for the class including allocation and garbage collection routines,
@@ -573,6 +559,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(DEBUG_INFO, DebugInfo, debug_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
+ V(INTERPRETER_DATA, InterpreterData, interpreter_data) \
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(MODULE, Module, module) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
@@ -583,6 +570,9 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(TUPLE2, Tuple2, tuple2) \
V(TUPLE3, Tuple3, tuple3) \
+ V(WASM_COMPILED_MODULE, WasmCompiledModule, wasm_compiled_module) \
+ V(WASM_DEBUG_INFO, WasmDebugInfo, wasm_debug_info) \
+ V(WASM_SHARED_MODULE_DATA, WasmSharedModuleData, wasm_shared_module_data) \
V(CALLABLE_TASK, CallableTask, callable_task) \
V(CALLBACK_TASK, CallbackTask, callback_task) \
V(PROMISE_FULFILL_REACTION_JOB_TASK, PromiseFulfillReactionJobTask, \
@@ -749,6 +739,7 @@ enum InstanceType : uint16_t {
FIXED_BIGINT64_ARRAY_TYPE,
FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
+ FEEDBACK_METADATA_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
@@ -763,6 +754,7 @@ enum InstanceType : uint16_t {
DEBUG_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
+ INTERPRETER_DATA_TYPE,
MODULE_INFO_ENTRY_TYPE,
MODULE_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
@@ -773,6 +765,9 @@ enum InstanceType : uint16_t {
STACK_FRAME_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
+ WASM_COMPILED_MODULE_TYPE,
+ WASM_DEBUG_INFO_TYPE,
+ WASM_SHARED_MODULE_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
CALLBACK_TASK_TYPE,
@@ -782,12 +777,23 @@ enum InstanceType : uint16_t {
// FixedArrays.
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
+ BOILERPLATE_DESCRIPTION_TYPE,
DESCRIPTOR_ARRAY_TYPE,
HASH_TABLE_TYPE,
SCOPE_INFO_TYPE,
- TRANSITION_ARRAY_TYPE, // LAST_FIXED_ARRAY_TYPE
+ TRANSITION_ARRAY_TYPE,
+ BLOCK_CONTEXT_TYPE, // FIRST_CONTEXT_TYPE
+ CATCH_CONTEXT_TYPE,
+ DEBUG_EVALUATE_CONTEXT_TYPE,
+ EVAL_CONTEXT_TYPE,
+ FUNCTION_CONTEXT_TYPE,
+ MODULE_CONTEXT_TYPE,
+ NATIVE_CONTEXT_TYPE,
+ SCRIPT_CONTEXT_TYPE,
+ WITH_CONTEXT_TYPE, // LAST_FIXED_ARRAY_TYPE, LAST_CONTEXT_TYPE
// Misc.
+ CALL_HANDLER_INFO_TYPE,
CELL_TYPE,
CODE_DATA_CONTAINER_TYPE,
FEEDBACK_CELL_TYPE,
@@ -800,6 +806,7 @@ enum InstanceType : uint16_t {
SMALL_ORDERED_HASH_SET_TYPE,
STORE_HANDLER_TYPE,
WEAK_CELL_TYPE,
+ WEAK_FIXED_ARRAY_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -821,6 +828,7 @@ enum InstanceType : uint16_t {
JS_OBJECT_TYPE,
JS_ARGUMENTS_TYPE,
JS_ARRAY_BUFFER_TYPE,
+ JS_ARRAY_ITERATOR_TYPE,
JS_ARRAY_TYPE,
JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
JS_ASYNC_GENERATOR_OBJECT_TYPE,
@@ -835,6 +843,7 @@ enum InstanceType : uint16_t {
JS_MESSAGE_OBJECT_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
+ JS_REGEXP_STRING_ITERATOR_TYPE,
JS_SET_TYPE,
JS_SET_KEY_VALUE_ITERATOR_TYPE,
JS_SET_VALUE_ITERATOR_TYPE,
@@ -845,11 +854,8 @@ enum InstanceType : uint16_t {
JS_TYPED_ARRAY_TYPE,
JS_DATA_VIEW_TYPE,
-#define ARRAY_ITERATOR_TYPE(type) type,
- ARRAY_ITERATOR_TYPE_LIST(ARRAY_ITERATOR_TYPE)
-#undef ARRAY_ITERATOR_TYPE
-
- WASM_INSTANCE_TYPE,
+ WASM_GLOBAL_TYPE,
+ WASM_INSTANCE_TYPE,
WASM_MEMORY_TYPE,
WASM_MODULE_TYPE,
WASM_TABLE_TYPE,
@@ -870,7 +876,10 @@ enum InstanceType : uint16_t {
LAST_FUNCTION_TYPE = JS_FUNCTION_TYPE,
// Boundaries for testing if given HeapObject is a subclass of FixedArray.
FIRST_FIXED_ARRAY_TYPE = FIXED_ARRAY_TYPE,
- LAST_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
+ LAST_FIXED_ARRAY_TYPE = WITH_CONTEXT_TYPE,
+ // Boundaries for testing if given HeapObject is a Context
+ FIRST_CONTEXT_TYPE = BLOCK_CONTEXT_TYPE,
+ LAST_CONTEXT_TYPE = WITH_CONTEXT_TYPE,
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
LAST_MICROTASK_TYPE = PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE,
@@ -896,18 +905,6 @@ enum InstanceType : uint16_t {
// wrappers.
LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
- FIRST_ARRAY_KEY_ITERATOR_TYPE = JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
- LAST_ARRAY_KEY_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE,
-
- FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
- LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE,
-
- FIRST_ARRAY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
- LAST_ARRAY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
-
- FIRST_ARRAY_ITERATOR_TYPE = FIRST_ARRAY_KEY_ITERATOR_TYPE,
- LAST_ARRAY_ITERATOR_TYPE = LAST_ARRAY_VALUE_ITERATOR_TYPE,
-
FIRST_SET_ITERATOR_TYPE = JS_SET_KEY_VALUE_ITERATOR_TYPE,
LAST_SET_ITERATOR_TYPE = JS_SET_VALUE_ITERATOR_TYPE,
@@ -997,6 +994,7 @@ template <class C> inline bool Is(Object* obj);
V(AccessCheckNeeded) \
V(ArrayList) \
V(BigInt) \
+ V(BigIntWrapper) \
V(BoilerplateDescription) \
V(Boolean) \
V(BooleanWrapper) \
@@ -1032,6 +1030,7 @@ template <class C> inline bool Is(Object* obj);
V(FixedArray) \
V(FixedArrayBase) \
V(FixedArrayExact) \
+ V(FixedArrayOfWeakCells) \
V(FixedBigInt64Array) \
V(FixedBigUint64Array) \
V(FixedDoubleArray) \
@@ -1079,6 +1078,7 @@ template <class C> inline bool Is(Object* obj);
V(JSProxy) \
V(JSReceiver) \
V(JSRegExp) \
+ V(JSRegExpStringIterator) \
V(JSSet) \
V(JSSetIterator) \
V(JSSloppyArgumentsObject) \
@@ -1139,13 +1139,13 @@ template <class C> inline bool Is(Object* obj);
V(TransitionArray) \
V(Undetectable) \
V(UniqueName) \
+ V(WasmGlobalObject) \
V(WasmInstanceObject) \
V(WasmMemoryObject) \
V(WasmModuleObject) \
V(WasmTableObject) \
V(WeakCell) \
- V(WeakFixedArray) \
- V(WeakHashTable)
+ V(WeakFixedArray)
#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
V(Dictionary) \
@@ -1224,7 +1224,8 @@ class Object {
#undef DECL_STRUCT_PREDICATE
// ES6, #sec-isarray. NOT to be confused with %_IsArray.
- INLINE(MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object));
+ INLINE(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<Object> object));
INLINE(bool IsSmallOrderedHashTable() const);
@@ -1266,11 +1267,12 @@ class Object {
bool BooleanValue(); // ECMA-262 9.2.
// ES6 section 7.2.11 Abstract Relational Comparison
- MUST_USE_RESULT static Maybe<ComparisonResult> Compare(Handle<Object> x,
- Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static Maybe<ComparisonResult> Compare(
+ Handle<Object> x, Handle<Object> y);
// ES6 section 7.2.12 Abstract Equality Comparison
- MUST_USE_RESULT static Maybe<bool> Equals(Handle<Object> x, Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> Equals(Handle<Object> x,
+ Handle<Object> y);
// ES6 section 7.2.13 Strict Equality Comparison
bool StrictEquals(Object* that);
@@ -1282,104 +1284,104 @@ class Object {
// Passing a non-null method_name allows us to give a more informative
// error message for those cases where ToObject is being called on
// the receiver of a built-in method.
- MUST_USE_RESULT static inline MaybeHandle<JSReceiver> ToObject(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<JSReceiver> ToObject(
Isolate* isolate, Handle<Object> object,
const char* method_name = nullptr);
- MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ToObject(
Isolate* isolate, Handle<Object> object, Handle<Context> native_context,
const char* method_name = nullptr);
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
- MUST_USE_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
Isolate* isolate, Handle<Object> object);
// ES6 section 7.1.14 ToPropertyKey
- MUST_USE_RESULT static inline MaybeHandle<Name> ToName(Isolate* isolate,
- Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Name> ToName(
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.1 ToPrimitive
- MUST_USE_RESULT static inline MaybeHandle<Object> ToPrimitive(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToPrimitive(
Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
// ES6 section 7.1.3 ToNumber
- MUST_USE_RESULT static inline MaybeHandle<Object> ToNumber(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumber(
Handle<Object> input);
- MUST_USE_RESULT static inline MaybeHandle<Object> ToNumeric(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToNumeric(
Handle<Object> input);
// ES6 section 7.1.4 ToInteger
- MUST_USE_RESULT static inline MaybeHandle<Object> ToInteger(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToInteger(
Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.5 ToInt32
- MUST_USE_RESULT static inline MaybeHandle<Object> ToInt32(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToInt32(
Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.6 ToUint32
- MUST_USE_RESULT inline static MaybeHandle<Object> ToUint32(
+ V8_WARN_UNUSED_RESULT inline static MaybeHandle<Object> ToUint32(
Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.12 ToString
- MUST_USE_RESULT static inline MaybeHandle<String> ToString(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<String> ToString(
Isolate* isolate, Handle<Object> input);
static Handle<String> NoSideEffectsToString(Isolate* isolate,
Handle<Object> input);
// ES6 section 7.1.14 ToPropertyKey
- MUST_USE_RESULT static inline MaybeHandle<Object> ToPropertyKey(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToPropertyKey(
Isolate* isolate, Handle<Object> value);
// ES6 section 7.1.15 ToLength
- MUST_USE_RESULT static inline MaybeHandle<Object> ToLength(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToLength(
Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.17 ToIndex
- MUST_USE_RESULT static inline MaybeHandle<Object> ToIndex(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> ToIndex(
Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index);
// ES6 section 7.3.9 GetMethod
- MUST_USE_RESULT static MaybeHandle<Object> GetMethod(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetMethod(
Handle<JSReceiver> receiver, Handle<Name> name);
// ES6 section 7.3.17 CreateListFromArrayLike
- MUST_USE_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> CreateListFromArrayLike(
Isolate* isolate, Handle<Object> object, ElementTypes element_types);
// Get length property and apply ToLength.
- MUST_USE_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetLengthFromArrayLike(
Isolate* isolate, Handle<Object> object);
// ES6 section 12.5.6 The typeof Operator
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
// ES6 section 12.7 Additive Operators
- MUST_USE_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
- Handle<Object> lhs,
- Handle<Object> rhs);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
// ES6 section 12.9 Relational Operators
- MUST_USE_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
- Handle<Object> y);
- MUST_USE_RESULT static inline Maybe<bool> GreaterThanOrEqual(
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
+ Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> GreaterThanOrEqual(
+ Handle<Object> x, Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThan(Handle<Object> x,
+ Handle<Object> y);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> LessThanOrEqual(
Handle<Object> x, Handle<Object> y);
- MUST_USE_RESULT static inline Maybe<bool> LessThan(Handle<Object> x,
- Handle<Object> y);
- MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(Handle<Object> x,
- Handle<Object> y);
// ES6 section 7.3.19 OrdinaryHasInstance (C, O).
- MUST_USE_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryHasInstance(
Isolate* isolate, Handle<Object> callable, Handle<Object> object);
// ES6 section 12.10.4 Runtime Semantics: InstanceofOperator(O, C)
- MUST_USE_RESULT static MaybeHandle<Object> InstanceOf(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> InstanceOf(
Isolate* isolate, Handle<Object> object, Handle<Object> callable);
- V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
- LookupIterator* it);
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ GetProperty(LookupIterator* it);
// ES6 [[Set]] (when passed kDontThrow)
// Invariants for this and related functions (unless stated otherwise):
@@ -1388,62 +1390,60 @@ class Object {
// In some cases, an exception is thrown regardless of the ShouldThrow
// argument. These cases are either in accordance with the spec or not
// covered by it (eg., concerning API callbacks).
- MUST_USE_RESULT static Maybe<bool> SetProperty(LookupIterator* it,
- Handle<Object> value,
- LanguageMode language_mode,
- StoreFromKeyed store_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
+ LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
+ StoreFromKeyed store_mode);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetProperty(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetPropertyOrElement(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
- MUST_USE_RESULT static Maybe<bool> SetSuperProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetSuperProperty(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode);
- MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> WriteToReadOnlyProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> WriteToReadOnlyProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> RedefineIncompatibleProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> RedefineIncompatibleProperty(
Isolate* isolate, Handle<Object> name, Handle<Object> value,
ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> SetDataProperty(LookupIterator* it,
- Handle<Object> value);
- MUST_USE_RESULT static Maybe<bool> AddDataProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetDataProperty(
+ LookupIterator* it, Handle<Object> value);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataProperty(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ShouldThrow should_throw, StoreFromKeyed store_mode);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> object, Handle<Name> name);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
Handle<Object> object, Handle<Name> name);
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
LookupIterator* it);
- MUST_USE_RESULT static Maybe<bool> SetPropertyWithAccessor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithAccessor(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
- Handle<Object> receiver,
- Handle<JSReceiver> getter);
- MUST_USE_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
+ Handle<Object> receiver, Handle<JSReceiver> getter);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithDefinedSetter(
Handle<Object> receiver, Handle<JSReceiver> setter, Handle<Object> value,
ShouldThrow should_throw);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<Object> object, uint32_t index);
- MUST_USE_RESULT static inline MaybeHandle<Object> SetElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> SetElement(
Isolate* isolate, Handle<Object> object, uint32_t index,
Handle<Object> value, LanguageMode language_mode);
@@ -1469,11 +1469,11 @@ class Object {
bool SameValueZero(Object* other);
// ES6 section 9.4.2.3 ArraySpeciesCreate (part of it)
- MUST_USE_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ArraySpeciesConstructor(
Isolate* isolate, Handle<Object> original_array);
// ES6 section 7.3.20 SpeciesConstructor ( O, defaultConstructor )
- MUST_USE_RESULT static MaybeHandle<Object> SpeciesConstructor(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SpeciesConstructor(
Isolate* isolate, Handle<JSReceiver> recv,
Handle<JSFunction> default_ctor);
@@ -1532,27 +1532,27 @@ class Object {
// Helper for SetProperty and SetSuperProperty.
// Return value is only meaningful if [found] is set to true on return.
- MUST_USE_RESULT static Maybe<bool> SetPropertyInternal(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyInternal(
LookupIterator* it, Handle<Object> value, LanguageMode language_mode,
StoreFromKeyed store_mode, bool* found);
- MUST_USE_RESULT static MaybeHandle<Name> ConvertToName(Isolate* isolate,
- Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToPropertyKey(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Name> ConvertToName(
+ Isolate* isolate, Handle<Object> input);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToPropertyKey(
Isolate* isolate, Handle<Object> value);
- MUST_USE_RESULT static MaybeHandle<String> ConvertToString(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ConvertToString(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToNumberOrNumeric(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToNumberOrNumeric(
Isolate* isolate, Handle<Object> input, Conversion mode);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToInteger(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInteger(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToInt32(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToInt32(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToUint32(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToUint32(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToLength(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToLength(
Isolate* isolate, Handle<Object> input);
- MUST_USE_RESULT static MaybeHandle<Object> ConvertToIndex(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToIndex(
Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index);
@@ -1572,7 +1572,14 @@ struct Brief {
const Object* value;
};
+struct MaybeObjectBrief {
+ explicit MaybeObjectBrief(const MaybeObject* const v) : value(v) {}
+ const MaybeObject* value;
+};
+
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const MaybeObjectBrief& v);
// Smi represents integer Numbers that can be stored in 31 bits.
// Smis are immediate which means they are NOT allocated in the heap.
@@ -1762,19 +1769,18 @@ class HeapObject: public Object {
// If it's not performance critical iteration use the non-templatized
// version.
void IterateBody(ObjectVisitor* v);
- void IterateBody(InstanceType type, int object_size, ObjectVisitor* v);
+ void IterateBody(Map* map, int object_size, ObjectVisitor* v);
template <typename ObjectVisitor>
inline void IterateBodyFast(ObjectVisitor* v);
template <typename ObjectVisitor>
- inline void IterateBodyFast(InstanceType type, int object_size,
- ObjectVisitor* v);
+ inline void IterateBodyFast(Map* map, int object_size, ObjectVisitor* v);
// Returns true if the object contains a tagged value at given offset.
// It is used for invalid slots filtering. If the offset points outside
// of the object or to the map word, the result is UNDEFINED (!!!).
- bool IsValidSlot(int offset);
+ bool IsValidSlot(Map* map, int offset);
// Returns the heap object's size in bytes
inline int Size() const;
@@ -1789,6 +1795,7 @@ class HeapObject: public Object {
// Does not invoke write barrier, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
+ static inline MaybeObject** RawMaybeWeakField(HeapObject* obj, int offset);
DECL_CAST(HeapObject)
@@ -2031,12 +2038,12 @@ class JSReceiver: public HeapObject {
DECL_CAST(JSReceiver)
// ES6 section 7.1.1 ToPrimitive
- MUST_USE_RESULT static MaybeHandle<Object> ToPrimitive(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
Handle<JSReceiver> receiver,
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
// ES6 section 7.1.1.1 OrdinaryToPrimitive
- MUST_USE_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
@@ -2045,108 +2052,108 @@ class JSReceiver: public HeapObject {
static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
Handle<JSReceiver> receiver);
- MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasInPrototypeChain(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
// Reads all enumerable own properties of source and adds them to
// target, using either Set or CreateDataProperty depending on the
// use_set argument. This only copies values not present in the
// maybe_excluded_properties list.
- MUST_USE_RESULT static Maybe<bool> SetOrCopyDataProperties(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetOrCopyDataProperties(
Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
const ScopedVector<Handle<Object>>* excluded_properties = nullptr,
bool use_set = true);
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
- MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
- MUST_USE_RESULT static inline Maybe<bool> HasProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<bool> HasElement(
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasElement(
Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static Maybe<bool> HasOwnProperty(Handle<JSReceiver> object,
- Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasOwnProperty(
+ Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<bool> HasOwnProperty(
Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSReceiver> receiver, const char* key);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetProperty(
Handle<JSReceiver> receiver, Handle<Name> name);
- MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<Object> GetElement(
Isolate* isolate, Handle<JSReceiver> receiver, uint32_t index);
// Implementation of ES6 [[Delete]]
- MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = LanguageMode::kSloppy);
- MUST_USE_RESULT static Maybe<bool> DeleteProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
Handle<JSReceiver> object, Handle<Name> name,
LanguageMode language_mode = LanguageMode::kSloppy);
- MUST_USE_RESULT static Maybe<bool> DeleteProperty(LookupIterator* it,
- LanguageMode language_mode);
- MUST_USE_RESULT static Maybe<bool> DeleteElement(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteProperty(
+ LookupIterator* it, LanguageMode language_mode);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteElement(
Handle<JSReceiver> object, uint32_t index,
LanguageMode language_mode = LanguageMode::kSloppy);
- MUST_USE_RESULT static Object* DefineProperty(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> name,
- Handle<Object> attributes);
- MUST_USE_RESULT static MaybeHandle<Object> DefineProperties(
+ V8_WARN_UNUSED_RESULT static Object* DefineProperty(
+ Isolate* isolate, Handle<Object> object, Handle<Object> name,
+ Handle<Object> attributes);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> DefineProperties(
Isolate* isolate, Handle<Object> object, Handle<Object> properties);
// "virtual" dispatcher to the correct [[DefineOwnProperty]] implementation.
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
// ES6 7.3.4 (when passed kDontThrow)
- MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
// ES6 9.1.6.1
- MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
Isolate* isolate, Handle<JSObject> object, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OrdinaryDefineOwnProperty(
LookupIterator* it, PropertyDescriptor* desc, ShouldThrow should_throw);
// ES6 9.1.6.2
- MUST_USE_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsCompatiblePropertyDescriptor(
Isolate* isolate, bool extensible, PropertyDescriptor* desc,
PropertyDescriptor* current, Handle<Name> property_name,
ShouldThrow should_throw);
// ES6 9.1.6.3
// |it| can be NULL in cases where the ES spec passes |undefined| as the
// receiver. Exactly one of |it| and |property_name| must be provided.
- MUST_USE_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> ValidateAndApplyPropertyDescriptor(
Isolate* isolate, LookupIterator* it, bool extensible,
PropertyDescriptor* desc, PropertyDescriptor* current,
ShouldThrow should_throw, Handle<Name> property_name);
- V8_EXPORT_PRIVATE MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
- Isolate* isolate, Handle<JSReceiver> object, Handle<Object> key,
- PropertyDescriptor* desc);
- MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static Maybe<bool>
+ GetOwnPropertyDescriptor(Isolate* isolate, Handle<JSReceiver> object,
+ Handle<Object> key, PropertyDescriptor* desc);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
LookupIterator* it, PropertyDescriptor* desc);
typedef PropertyAttributes IntegrityLevel;
// ES6 7.3.14 (when passed kDontThrow)
// 'level' must be SEALED or FROZEN.
- MUST_USE_RESULT static Maybe<bool> SetIntegrityLevel(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetIntegrityLevel(
Handle<JSReceiver> object, IntegrityLevel lvl, ShouldThrow should_throw);
// ES6 7.3.15
// 'level' must be SEALED or FROZEN.
- MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
Handle<JSReceiver> object, IntegrityLevel lvl);
// ES6 [[PreventExtensions]] (when passed kDontThrow)
- MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
Handle<JSReceiver> object, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSReceiver> object);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(
+ Handle<JSReceiver> object);
// Returns the class name ([[Class]] property in the specification).
V8_EXPORT_PRIVATE String* class_name();
@@ -2157,26 +2164,25 @@ class JSReceiver: public HeapObject {
Handle<Context> GetCreationContext();
- MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
- Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
GetOwnPropertyAttributes(Handle<JSReceiver> object, Handle<Name> name);
- MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
GetOwnPropertyAttributes(Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttributes(
- Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
+ GetElementAttributes(Handle<JSReceiver> object, uint32_t index);
+ V8_WARN_UNUSED_RESULT static inline Maybe<PropertyAttributes>
GetOwnElementAttributes(Handle<JSReceiver> object, uint32_t index);
- MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
// Set the object's prototype (only JSReceiver and null are allowed values).
- MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSReceiver> object,
- Handle<Object> value,
- bool from_javascript,
- ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSReceiver> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
inline static Handle<Object> GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name);
@@ -2197,14 +2203,14 @@ class JSReceiver: public HeapObject {
void SetIdentityHash(int masked_hash);
// ES6 [[OwnPropertyKeys]] (modulo return type)
- MUST_USE_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
+ V8_WARN_UNUSED_RESULT static inline MaybeHandle<FixedArray> OwnPropertyKeys(
Handle<JSReceiver> object);
- MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnValues(
Handle<JSReceiver> object, PropertyFilter filter,
bool try_fast_path = true);
- MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
Handle<JSReceiver> object, PropertyFilter filter,
bool try_fast_path = true);
@@ -2231,7 +2237,7 @@ class JSObject: public JSReceiver {
public:
static bool IsUnmodifiedApiObject(Object** o);
- static MUST_USE_RESULT MaybeHandle<JSObject> New(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> New(
Handle<JSFunction> constructor, Handle<JSReceiver> new_target,
Handle<AllocationSite> site = Handle<AllocationSite>::null());
@@ -2308,7 +2314,7 @@ class JSObject: public JSReceiver {
// Requires: HasFastElements().
static void EnsureWritableFastElements(Handle<JSObject> object);
- MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
// The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
@@ -2316,26 +2322,29 @@ class JSObject: public JSReceiver {
// to the default behavior that calls the setter.
enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
- MUST_USE_RESULT static MaybeHandle<Object> DefineOwnPropertyIgnoreAttributes(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
AccessorInfoHandling handling = DONT_FORCE_FIELD);
- MUST_USE_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ShouldThrow should_throw,
AccessorInfoHandling handling = DONT_FORCE_FIELD);
- MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetOwnPropertyIgnoreAttributes(Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes);
- MUST_USE_RESULT static MaybeHandle<Object> SetOwnElementIgnoreAttributes(
- Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ SetOwnElementIgnoreAttributes(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes);
// Equivalent to one of the above depending on whether |name| can be converted
// to an array index.
- MUST_USE_RESULT static MaybeHandle<Object>
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
DefinePropertyOrElementIgnoreAttributes(Handle<JSObject> object,
Handle<Name> name,
Handle<Object> value,
@@ -2343,17 +2352,17 @@ class JSObject: public JSReceiver {
// Adds or reconfigures a property to attributes NONE. It will fail when it
// cannot.
- MUST_USE_RESULT static Maybe<bool> CreateDataProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CreateDataProperty(
LookupIterator* it, Handle<Object> value,
ShouldThrow should_throw = kDontThrow);
static void AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value, PropertyAttributes attributes);
- MUST_USE_RESULT static Maybe<bool> AddDataElement(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> AddDataElement(
Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
PropertyAttributes attributes, ShouldThrow should_throw);
- MUST_USE_RESULT static MaybeHandle<Object> AddDataElement(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> AddDataElement(
Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
PropertyAttributes attributes);
@@ -2403,7 +2412,7 @@ class JSObject: public JSReceiver {
// Utility used by many Array builtins and runtime functions
static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
- // Alternative implementation of WeakFixedArray::NullCallback.
+ // Alternative implementation of FixedArrayOfWeakCells::NullCallback.
class PrototypeRegistryCompactionCallback {
public:
static void Callback(Object* value, int old_index, int new_index);
@@ -2414,10 +2423,10 @@ class JSObject: public JSReceiver {
inline InterceptorInfo* GetIndexedInterceptor();
// Used from JSReceiver.
- MUST_USE_RESULT static Maybe<PropertyAttributes>
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
GetPropertyAttributesWithInterceptor(LookupIterator* it);
- MUST_USE_RESULT static Maybe<PropertyAttributes>
- GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes>
+ GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
// Defines an AccessorPair property on the given object.
// TODO(mstarzinger): Rename to SetAccessor().
@@ -2432,14 +2441,14 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes);
// Defines an AccessorInfo property on the given object.
- MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetAccessor(
Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> info,
PropertyAttributes attributes);
// The result must be checked first for exceptions. If there's no exception,
// the output parameter |done| indicates whether the interceptor has a result
// or not.
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetPropertyWithInterceptor(
LookupIterator* it, bool* done);
static void ValidateElements(JSObject* object);
@@ -2489,11 +2498,11 @@ class JSObject: public JSReceiver {
inline bool HasIndexedInterceptor();
// Support functions for v8 api (needed for correct interceptor behavior).
- MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedProperty(
Handle<JSObject> object, Handle<Name> name);
- MUST_USE_RESULT static Maybe<bool> HasRealElementProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealElementProperty(
Handle<JSObject> object, uint32_t index);
- MUST_USE_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
Handle<JSObject> object, Handle<Name> name);
// Get the header size for a JSObject. Used to compute the index of
@@ -2574,10 +2583,9 @@ class JSObject: public JSReceiver {
= UPDATE_WRITE_BARRIER);
// Set the object's prototype (only JSReceiver and null are allowed values).
- MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSObject> object,
- Handle<Object> value,
- bool from_javascript,
- ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSObject> object, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
// Makes the object prototype immutable
// Never called from JavaScript
@@ -2594,10 +2602,10 @@ class JSObject: public JSReceiver {
// Check whether this object references another object
bool ReferencesObject(Object* obj);
- MUST_USE_RESULT static Maybe<bool> TestIntegrityLevel(Handle<JSObject> object,
- IntegrityLevel lvl);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> TestIntegrityLevel(
+ Handle<JSObject> object, IntegrityLevel lvl);
- MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
Handle<JSObject> object, ShouldThrow should_throw);
static bool IsExtensible(Handle<JSObject> object);
@@ -2719,13 +2727,13 @@ class JSObject: public JSReceiver {
friend class Object;
// Used from Object::GetProperty().
- MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
- LookupIterator* it);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
+ GetPropertyWithFailedAccessCheck(LookupIterator* it);
- MUST_USE_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPropertyWithFailedAccessCheck(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
- MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
LookupIterator* it, ShouldThrow should_throw);
bool ReferencesObjectFromElements(FixedArray* elements,
@@ -2735,7 +2743,7 @@ class JSObject: public JSReceiver {
// Helper for fast versions of preventExtensions, seal, and freeze.
// attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
template <PropertyAttributes attrs>
- MUST_USE_RESULT static Maybe<bool> PreventExtensionsWithTransition(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensionsWithTransition(
Handle<JSObject> object, ShouldThrow should_throw);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
@@ -2925,8 +2933,8 @@ class PrototypeInfo : public Struct {
// [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
DECL_ACCESSORS(weak_cell, Object)
- // [prototype_users]: WeakFixedArray containing maps using this prototype,
- // or Smi(0) if uninitialized.
+ // [prototype_users]: FixedArrayOfWeakCells containing maps using this
+ // prototype, or Smi(0) if uninitialized.
DECL_ACCESSORS(prototype_users, Object)
// [object_create_map]: A field caching the map for Object.create(prototype).
@@ -2939,13 +2947,7 @@ class PrototypeInfo : public Struct {
// is stored. Returns UNREGISTERED if this prototype has not been registered.
inline int registry_slot() const;
inline void set_registry_slot(int slot);
- // [validity_cell]: Cell containing the validity bit for prototype chains
- // going through this object, or Smi(0) if uninitialized.
- // When a prototype object changes its map, then both its own validity cell
- // and those of all "downstream" prototypes are invalidated; handlers for a
- // given receiver embed the currently valid cell for that receiver's prototype
- // during their compilation and check it on execution.
- DECL_ACCESSORS(validity_cell, Object)
+
// [bit_field]
inline int bit_field() const;
inline void set_bit_field(int bit_field);
@@ -3444,7 +3446,7 @@ class JSFunction: public JSObject {
// Creates a map that matches the constructor's initial map, but with
// [[prototype]] being new.target.prototype. Because new.target can be a
// JSProxy, this can call back into JavaScript.
- static MUST_USE_RESULT MaybeHandle<Map> GetDerivedMap(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Map> GetDerivedMap(
Isolate* isolate, Handle<JSFunction> constructor,
Handle<JSReceiver> new_target);
@@ -3462,6 +3464,11 @@ class JSFunction: public JSObject {
// Returns if this function has been compiled to native code yet.
inline bool is_compiled();
+ static int GetHeaderSize(bool function_has_prototype_slot) {
+ return function_has_prototype_slot ? JSFunction::kSizeWithPrototype
+ : JSFunction::kSizeWithoutPrototype;
+ }
+
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
@@ -3493,8 +3500,9 @@ class JSFunction: public JSObject {
// Because of the way this abstract operation is used in the spec,
// it should never fail, but in practice it will fail if the generated
// function name's length exceeds String::kMaxLength.
- static MUST_USE_RESULT bool SetName(Handle<JSFunction> function,
- Handle<Name> name, Handle<String> prefix);
+ static V8_WARN_UNUSED_RESULT bool SetName(Handle<JSFunction> function,
+ Handle<Name> name,
+ Handle<String> prefix);
// The function's displayName if it is set, otherwise name if it is
// configured, otherwise shared function info
@@ -3624,9 +3632,8 @@ class DateCache;
// Representation for JS date objects.
class JSDate: public JSObject {
public:
- static MUST_USE_RESULT MaybeHandle<JSDate> New(Handle<JSFunction> constructor,
- Handle<JSReceiver> new_target,
- double tv);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<JSDate> New(
+ Handle<JSFunction> constructor, Handle<JSReceiver> new_target, double tv);
// If one component is NaN, all of them are, indicating a NaN time value.
// [value]: the time value.
@@ -4000,7 +4007,8 @@ class Oddball: public HeapObject {
inline void set_kind(byte kind);
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
- MUST_USE_RESULT static inline Handle<Object> ToNumber(Handle<Oddball> input);
+ V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
+ Handle<Oddball> input);
DECL_CAST(Oddball)
@@ -4198,9 +4206,9 @@ class WeakCell : public HeapObject {
// The JSProxy describes EcmaScript Harmony proxies
class JSProxy: public JSReceiver {
public:
- MUST_USE_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
- Handle<Object>,
- Handle<Object>);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSProxy> New(Isolate* isolate,
+ Handle<Object>,
+ Handle<Object>);
// [handler]: The handler property.
DECL_ACCESSORS(handler, Object)
@@ -4218,43 +4226,41 @@ class JSProxy: public JSReceiver {
static MaybeHandle<Object> GetPrototype(Handle<JSProxy> receiver);
// ES6 9.5.2
- MUST_USE_RESULT static Maybe<bool> SetPrototype(Handle<JSProxy> proxy,
- Handle<Object> value,
- bool from_javascript,
- ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetPrototype(
+ Handle<JSProxy> proxy, Handle<Object> value, bool from_javascript,
+ ShouldThrow should_throw);
// ES6 9.5.3
- MUST_USE_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsExtensible(Handle<JSProxy> proxy);
// ES6, #sec-isarray. NOT to be confused with %_IsArray.
- MUST_USE_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> IsArray(Handle<JSProxy> proxy);
// ES6 9.5.4 (when passed kDontThrow)
- MUST_USE_RESULT static Maybe<bool> PreventExtensions(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> PreventExtensions(
Handle<JSProxy> proxy, ShouldThrow should_throw);
// ES6 9.5.5
- MUST_USE_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetOwnPropertyDescriptor(
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
PropertyDescriptor* desc);
// ES6 9.5.6
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSProxy> object, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
// ES6 9.5.7
- MUST_USE_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
- Handle<JSProxy> proxy,
- Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> HasProperty(Isolate* isolate,
+ Handle<JSProxy> proxy,
+ Handle<Name> name);
// This function never returns false.
// It returns either true or throws.
- MUST_USE_RESULT static Maybe<bool> CheckHasTrap(Isolate* isolate,
- Handle<Name> name,
- Handle<JSReceiver> target);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> CheckHasTrap(
+ Isolate* isolate, Handle<Name> name, Handle<JSReceiver> target);
// ES6 9.5.8
- MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
Handle<Object> receiver, bool* was_found);
@@ -4267,22 +4273,20 @@ class JSProxy: public JSReceiver {
AccessKind access_kind);
// ES6 9.5.9
- MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
- Handle<Name> name,
- Handle<Object> value,
- Handle<Object> receiver,
- LanguageMode language_mode);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> SetProperty(
+ Handle<JSProxy> proxy, Handle<Name> name, Handle<Object> value,
+ Handle<Object> receiver, LanguageMode language_mode);
// ES6 9.5.10 (when passed LanguageMode::kSloppy)
- MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
// ES6 9.5.12
- MUST_USE_RESULT static Maybe<bool> OwnPropertyKeys(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> OwnPropertyKeys(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
PropertyFilter filter, KeyAccumulator* accumulator);
- MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+ V8_WARN_UNUSED_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
LookupIterator* it);
// Dispatched behavior.
@@ -4413,7 +4417,7 @@ class Foreign: public HeapObject {
typedef BodyDescriptor BodyDescriptorWeak;
private:
- friend class Heap;
+ friend class Factory;
friend class SerializerDeserializer;
friend class StartupSerializer;
@@ -4440,6 +4444,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(getter, Object)
inline bool has_getter();
DECL_ACCESSORS(setter, Object)
+ inline bool has_setter();
// This either points at the same as above, or a trampoline in case we are
// running with the simulator. Use these entries from generated code.
DECL_ACCESSORS(js_getter, Object)
@@ -4457,6 +4462,7 @@ class AccessorInfo: public Struct {
DECL_BOOLEAN_ACCESSORS(is_special_data_property)
DECL_BOOLEAN_ACCESSORS(replace_on_access)
DECL_BOOLEAN_ACCESSORS(is_sloppy)
+ DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
// The property attributes used when an API object template is instantiated
// for the first time. Changing of this value afterwards does not affect
@@ -4505,6 +4511,7 @@ class AccessorInfo: public Struct {
V(IsSpecialDataPropertyBit, bool, 1, _) \
V(IsSloppyBit, bool, 1, _) \
V(ReplaceOnAccessBit, bool, 1, _) \
+ V(HasNoSideEffectBit, bool, 1, _) \
V(InitialAttributesBits, PropertyAttributes, 3, _)
DEFINE_BIT_FIELDS(ACCESSOR_INFO_FLAGS_BIT_FIELDS)
@@ -4604,6 +4611,7 @@ class InterceptorInfo: public Struct {
DECL_BOOLEAN_ACCESSORS(all_can_read)
DECL_BOOLEAN_ACCESSORS(non_masking)
DECL_BOOLEAN_ACCESSORS(is_named)
+ DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
inline int flags() const;
inline void set_flags(int flags);
@@ -4629,6 +4637,7 @@ class InterceptorInfo: public Struct {
static const int kAllCanReadBit = 1;
static const int kNonMasking = 2;
static const int kNamed = 3;
+ static const int kHasNoSideEffect = 4;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
@@ -4642,6 +4651,12 @@ class CallHandlerInfo : public Tuple3 {
DECL_CAST(CallHandlerInfo)
+ inline bool IsSideEffectFreeCallHandlerInfo() const;
+
+ // Dispatched behavior.
+ DECL_PRINTER(CallHandlerInfo)
+ DECL_VERIFIER(CallHandlerInfo)
+
Address redirected_callback() const;
static const int kCallbackOffset = kValue1Offset;
@@ -4809,6 +4824,8 @@ class FunctionTemplateInfo: public TemplateInfo {
bool IsTemplateFor(Map* map);
inline bool instantiated();
+ inline bool BreakAtEntry();
+
// Helper function for cached accessors.
static MaybeHandle<Name> TryGetCachedPropertyName(Isolate* isolate,
Handle<Object> getter);
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index c107ab8cd1..0ccc5930b7 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -173,6 +173,10 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
bitfield = LengthBits::update(static_cast<uint32_t>(bitfield), new_length);
WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
}
+ inline void initialize_bitfield(bool sign, int length) {
+ intptr_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
+ WRITE_INTPTR_FIELD(this, kBitfieldOffset, bitfield);
+ }
inline void set_digit(int n, digit_t value) {
SLOW_DCHECK(0 <= n && n < length());
byte* address = FIELD_ADDR(this, kDigitsOffset + n * kDigitSize);
@@ -191,8 +195,7 @@ MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
}
Handle<MutableBigInt> result =
Cast(isolate->factory()->NewBigInt(length, pretenure));
- result->set_length(length);
- result->set_sign(false);
+ result->initialize_bitfield(false, length);
#if DEBUG
result->InitializeDigits(length, 0xBF);
#endif
@@ -202,12 +205,11 @@ MaybeHandle<MutableBigInt> MutableBigInt::New(Isolate* isolate, int length,
Handle<BigInt> MutableBigInt::NewFromInt(Isolate* isolate, int value) {
if (value == 0) return Zero(isolate);
Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(1));
- result->set_length(1);
- if (value > 0) {
- result->set_sign(false);
+ bool sign = value < 0;
+ result->initialize_bitfield(sign, 1);
+ if (!sign) {
result->set_digit(0, value);
} else {
- result->set_sign(true);
if (value == kMinInt) {
STATIC_ASSERT(kMinInt == -kMaxInt - 1);
result->set_digit(0, static_cast<BigInt::digit_t>(kMaxInt) + 1);
@@ -225,8 +227,8 @@ Handle<BigInt> MutableBigInt::NewFromSafeInteger(Isolate* isolate,
uint64_t absolute = std::abs(value);
int length = 64 / kDigitBits;
Handle<MutableBigInt> result = Cast(isolate->factory()->NewBigInt(length));
- result->set_length(length);
- result->set_sign(value < 0); // Treats -0 like 0.
+ bool sign = value < 0; // Treats -0 like 0.
+ result->initialize_bitfield(sign, length);
result->set_64_bits(absolute);
return MakeImmutable(result);
}
@@ -321,7 +323,14 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
// 3. Return a BigInt representing the mathematical value of base raised
// to the power exponent.
if (base->is_zero()) return base;
- if (base->length() == 1 && base->digit(0) == 1) return base;
+ if (base->length() == 1 && base->digit(0) == 1) {
+ // (-1) ** even_number == 1.
+ if (base->sign() && (exponent->digit(0) & 1) == 0) {
+ return UnaryMinus(base);
+ }
+ // (-1) ** odd_number == -1; 1 ** anything == 1.
+ return base;
+ }
// For all bases >= 2, very large exponents would lead to unrepresentable
// results.
STATIC_ASSERT(kMaxLengthBits < std::numeric_limits<digit_t>::max());
@@ -340,8 +349,10 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
if (base->length() == 1 && base->digit(0) == 2) {
// Fast path for 2^n.
int needed_digits = 1 + (n / kDigitBits);
- Handle<MutableBigInt> result =
- MutableBigInt::New(isolate, needed_digits).ToHandleChecked();
+ Handle<MutableBigInt> result;
+ if (!MutableBigInt::New(isolate, needed_digits).ToHandle(&result)) {
+ return MaybeHandle<BigInt>();
+ }
result->InitializeDigits(needed_digits);
// All bits are zero. Now set the n-th bit.
digit_t msd = static_cast<digit_t>(1) << (n % kDigitBits);
@@ -356,18 +367,14 @@ MaybeHandle<BigInt> BigInt::Exponentiate(Handle<BigInt> base,
if (n & 1) result = base;
n >>= 1;
for (; n != 0; n >>= 1) {
- if (!Multiply(running_square, running_square).ToHandle(&running_square)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig),
- BigInt);
- }
+ MaybeHandle<BigInt> maybe_result = Multiply(running_square, running_square);
+ if (!maybe_result.ToHandle(&running_square)) return maybe_result;
if (n & 1) {
if (result.is_null()) {
result = running_square;
} else {
- if (!Multiply(result, running_square).ToHandle(&result)) {
- THROW_NEW_ERROR(
- isolate, NewRangeError(MessageTemplate::kBigIntTooBig), BigInt);
- }
+ maybe_result = Multiply(result, running_square);
+ if (!maybe_result.ToHandle(&result)) return maybe_result;
}
}
}
@@ -557,7 +564,8 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseAnd(Handle<BigInt> x,
if (!AbsoluteSubOne(x, result_length).ToHandle(&result)) {
return MaybeHandle<MutableBigInt>();
}
- result = AbsoluteOr(result, AbsoluteSubOne(y), *result);
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
+ result = AbsoluteOr(result, y_1, *result);
return AbsoluteAddOne(result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
@@ -581,7 +589,8 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseXor(Handle<BigInt> x,
// (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
Handle<MutableBigInt> result =
AbsoluteSubOne(x, result_length).ToHandleChecked();
- return AbsoluteXor(result, AbsoluteSubOne(y), *result);
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
+ return AbsoluteXor(result, y_1, *result);
} else {
DCHECK(x->sign() != y->sign());
int result_length = Max(x->length(), y->length()) + 1;
@@ -611,7 +620,8 @@ MaybeHandle<MutableBigInt> MutableBigInt::BitwiseOr(Handle<BigInt> x,
// == -(((x-1) & (y-1)) + 1)
Handle<MutableBigInt> result =
AbsoluteSubOne(x, result_length).ToHandleChecked();
- result = AbsoluteAnd(result, AbsoluteSubOne(y), *result);
+ Handle<MutableBigInt> y_1 = AbsoluteSubOne(y);
+ result = AbsoluteAnd(result, y_1, *result);
return AbsoluteAddOne(result, true, *result);
} else {
DCHECK(x->sign() != y->sign());
@@ -978,7 +988,8 @@ MutableBigInt::Rounding MutableBigInt::DecideRounding(Handle<BigIntBase> x,
// If any other remaining bit is set, round up.
bitmask -= 1;
if ((current_digit & bitmask) != 0) return kRoundUp;
- for (; digit_index >= 0; digit_index--) {
+ while (digit_index > 0) {
+ digit_index--;
if (x->digit(digit_index) != 0) return kRoundUp;
}
return kTie;
@@ -1742,6 +1753,84 @@ Handle<BigInt> BigInt::Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign) {
return MutableBigInt::MakeImmutable(bigint);
}
+// The serialization format MUST NOT CHANGE without updating the format
+// version in value-serializer.cc!
+uint32_t BigInt::GetBitfieldForSerialization() const {
+ // In order to make the serialization format the same on 32/64 bit builds,
+ // we convert the length-in-digits to length-in-bytes for serialization.
+ // Being able to do this depends on having enough LengthBits:
+ STATIC_ASSERT(kMaxLength * kDigitSize <= LengthBits::kMax);
+ int bytelength = length() * kDigitSize;
+ return SignBits::encode(sign()) | LengthBits::encode(bytelength);
+}
+
+int BigInt::DigitsByteLengthForBitfield(uint32_t bitfield) {
+ return LengthBits::decode(bitfield);
+}
+
+// The serialization format MUST NOT CHANGE without updating the format
+// version in value-serializer.cc!
+void BigInt::SerializeDigits(uint8_t* storage) {
+ void* digits = reinterpret_cast<void*>(reinterpret_cast<Address>(this) +
+ kDigitsOffset - kHeapObjectTag);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ int bytelength = length() * kDigitSize;
+ memcpy(storage, digits, bytelength);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ digit_t* digit_storage = reinterpret_cast<digit_t*>(storage);
+ const digit_t* digit = reinterpret_cast<const digit_t*>(digits);
+ for (int i = 0; i < length(); i++) {
+ *digit_storage = ByteReverse(*digit);
+ digit_storage++;
+ digit++;
+ }
+#endif // V8_TARGET_BIG_ENDIAN
+}
+
+// The serialization format MUST NOT CHANGE without updating the format
+// version in value-serializer.cc!
+MaybeHandle<BigInt> BigInt::FromSerializedDigits(
+ Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage,
+ PretenureFlag pretenure) {
+ int bytelength = LengthBits::decode(bitfield);
+ DCHECK(digits_storage.length() == bytelength);
+ bool sign = SignBits::decode(bitfield);
+ int length = (bytelength + kDigitSize - 1) / kDigitSize; // Round up.
+ Handle<MutableBigInt> result =
+ MutableBigInt::Cast(isolate->factory()->NewBigInt(length, pretenure));
+ result->initialize_bitfield(sign, length);
+ void* digits = reinterpret_cast<void*>(reinterpret_cast<Address>(*result) +
+ kDigitsOffset - kHeapObjectTag);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ memcpy(digits, digits_storage.start(), bytelength);
+ void* padding_start =
+ reinterpret_cast<void*>(reinterpret_cast<Address>(digits) + bytelength);
+ memset(padding_start, 0, length * kDigitSize - bytelength);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ digit_t* digit = reinterpret_cast<digit_t*>(digits);
+ const digit_t* digit_storage =
+ reinterpret_cast<const digit_t*>(digits_storage.start());
+ for (int i = 0; i < bytelength / kDigitSize; i++) {
+ *digit = ByteReverse(*digit_storage);
+ digit_storage++;
+ digit++;
+ }
+ if (bytelength % kDigitSize) {
+ *digit = 0;
+ byte* digit_byte = reinterpret_cast<byte*>(digit);
+ digit_byte += sizeof(*digit) - 1;
+ const byte* digit_storage_byte =
+ reinterpret_cast<const byte*>(digit_storage);
+ for (int i = 0; i < bytelength % kDigitSize; i++) {
+ *digit_byte = *digit_storage_byte;
+ digit_byte--;
+ digit_storage_byte++;
+ }
+ }
+#endif // V8_TARGET_BIG_ENDIAN
+ return MutableBigInt::MakeImmutable(result);
+}
+
static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
MaybeHandle<String> MutableBigInt::ToStringBasePowerOfTwo(Handle<BigIntBase> x,
@@ -2089,13 +2178,12 @@ Handle<BigInt> BigInt::FromInt64(Isolate* isolate, int64_t n) {
int length = 64 / kDigitBits;
Handle<MutableBigInt> result =
MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
- result->set_length(length);
+ bool sign = n < 0;
+ result->initialize_bitfield(sign, length);
uint64_t absolute;
- if (n > 0) {
- result->set_sign(false);
+ if (!sign) {
absolute = static_cast<uint64_t>(n);
} else {
- result->set_sign(true);
if (n == std::numeric_limits<int64_t>::min()) {
absolute = static_cast<uint64_t>(std::numeric_limits<int64_t>::max()) + 1;
} else {
@@ -2112,8 +2200,7 @@ Handle<BigInt> BigInt::FromUint64(Isolate* isolate, uint64_t n) {
int length = 64 / kDigitBits;
Handle<MutableBigInt> result =
MutableBigInt::Cast(isolate->factory()->NewBigInt(length));
- result->set_length(length);
- result->set_sign(false);
+ result->initialize_bitfield(false, length);
result->set_64_bits(n);
return MutableBigInt::MakeImmutable(result);
}
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index 7409f0bade..3899853955 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -15,6 +15,10 @@
namespace v8 {
namespace internal {
+class BigInt;
+class ValueDeserializer;
+class ValueSerializer;
+
// BigIntBase is just the raw data object underlying a BigInt. Use with care!
// Most code should be using BigInts instead.
class BigIntBase : public HeapObject {
@@ -30,15 +34,16 @@ class BigIntBase : public HeapObject {
static const int kLengthFieldBits = 30;
STATIC_ASSERT(kMaxLength <= ((1 << kLengthFieldBits) - 1));
- class LengthBits : public BitField<int, 0, kLengthFieldBits> {};
- class SignBits : public BitField<bool, LengthBits::kNext, 1> {};
+ class SignBits : public BitField<bool, 0, 1> {};
+ class LengthBits : public BitField<int, SignBits::kNext, kLengthFieldBits> {};
+ STATIC_ASSERT(LengthBits::kNext <= 32);
static const int kBitfieldOffset = HeapObject::kHeaderSize;
static const int kDigitsOffset = kBitfieldOffset + kPointerSize;
static const int kHeaderSize = kDigitsOffset;
private:
- friend class BigInt;
+ friend class ::v8::internal::BigInt; // MSVC wants full namespace.
friend class MutableBigInt;
typedef uintptr_t digit_t;
@@ -168,6 +173,8 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
private:
friend class StringToBigIntHelper;
+ friend class ValueDeserializer;
+ friend class ValueSerializer;
// Special functions for StringToBigIntHelper:
static Handle<BigInt> Zero(Isolate* isolate);
@@ -178,6 +185,16 @@ class V8_EXPORT_PRIVATE BigInt : public BigIntBase {
uintptr_t factor, uintptr_t summand);
static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
+ // Special functions for ValueSerializer/ValueDeserializer:
+ uint32_t GetBitfieldForSerialization() const;
+ static int DigitsByteLengthForBitfield(uint32_t bitfield);
+ // Expects {storage} to have a length of at least
+ // {DigitsByteLengthForBitfield(GetBitfieldForSerialization())}.
+ void SerializeDigits(uint8_t* storage);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<BigInt> FromSerializedDigits(
+ Isolate* isolate, uint32_t bitfield, Vector<const uint8_t> digits_storage,
+ PretenureFlag pretenure);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(BigInt);
};
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index 8b14034f26..5ac8d766a4 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,6 +7,7 @@
#include "src/objects/code.h"
+#include "src/isolate.h"
#include "src/objects/dictionary.h"
#include "src/v8memory.h"
@@ -27,9 +28,17 @@ CAST_ACCESSOR(CodeDataContainer)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DeoptimizationData)
-int AbstractCode::instruction_size() {
+int AbstractCode::raw_instruction_size() {
if (IsCode()) {
- return GetCode()->instruction_size();
+ return GetCode()->raw_instruction_size();
+ } else {
+ return GetBytecodeArray()->length();
+ }
+}
+
+int AbstractCode::InstructionSize() {
+ if (IsCode()) {
+ return GetCode()->InstructionSize();
} else {
return GetBytecodeArray()->length();
}
@@ -72,17 +81,34 @@ int AbstractCode::ExecutableSize() {
}
}
-Address AbstractCode::instruction_start() {
+Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
- return GetCode()->instruction_start();
+ return GetCode()->raw_instruction_start();
} else {
return GetBytecodeArray()->GetFirstBytecodeAddress();
}
}
-Address AbstractCode::instruction_end() {
+Address AbstractCode::InstructionStart() {
if (IsCode()) {
- return GetCode()->instruction_end();
+ return GetCode()->InstructionStart();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress();
+ }
+}
+
+Address AbstractCode::raw_instruction_end() {
+ if (IsCode()) {
+ return GetCode()->raw_instruction_end();
+ } else {
+ return GetBytecodeArray()->GetFirstBytecodeAddress() +
+ GetBytecodeArray()->length();
+ }
+}
+
+Address AbstractCode::InstructionEnd() {
+ if (IsCode()) {
+ return GetCode()->InstructionEnd();
} else {
return GetBytecodeArray()->GetFirstBytecodeAddress() +
GetBytecodeArray()->length();
@@ -147,32 +173,28 @@ void DependentCode::copy(int from, int to) {
set(kCodesStartIndex + to, get(kCodesStartIndex + from));
}
-INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, raw_instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, handler_table_offset, kHandlerTableOffsetOffset)
-INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
#define CODE_ACCESSORS(name, type, offset) \
ACCESSORS_CHECKED2(Code, name, type, offset, true, \
!GetHeap()->InNewSpace(value))
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
-CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionsOffset)
CODE_ACCESSORS(code_data_container, CodeDataContainer, kCodeDataContainerOffset)
-CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
#undef CODE_ACCESSORS
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, nullptr);
WRITE_FIELD(this, kDeoptimizationDataOffset, nullptr);
WRITE_FIELD(this, kSourcePositionTableOffset, nullptr);
- WRITE_FIELD(this, kProtectedInstructionsOffset, nullptr);
WRITE_FIELD(this, kCodeDataContainerOffset, nullptr);
}
void Code::clear_padding() {
memset(address() + kHeaderPaddingStart, 0, kHeaderSize - kHeaderPaddingStart);
Address data_end =
- has_unwinding_info() ? unwinding_info_end() : instruction_end();
+ has_unwinding_info() ? unwinding_info_end() : raw_instruction_end();
memset(data_end, 0, CodeSize() - (data_end - address()));
}
@@ -202,38 +224,38 @@ void Code::set_next_code_link(Object* value) {
code_data_container()->set_next_code_link(value);
}
-int Code::InstructionSize() {
+int Code::InstructionSize() const {
#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionSize();
+ if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionSize();
#endif
- return instruction_size();
+ return raw_instruction_size();
}
-byte* Code::instruction_start() const {
+byte* Code::raw_instruction_start() const {
return const_cast<byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
}
-Address Code::InstructionStart() {
+Address Code::InstructionStart() const {
#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionStart();
+ if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionStart();
#endif
- return instruction_start();
+ return raw_instruction_start();
}
-byte* Code::instruction_end() const {
- return instruction_start() + instruction_size();
+byte* Code::raw_instruction_end() const {
+ return raw_instruction_start() + raw_instruction_size();
}
-Address Code::InstructionEnd() {
+Address Code::InstructionEnd() const {
#ifdef V8_EMBEDDED_BUILTINS
- if (Builtins::IsOffHeapBuiltin(this)) return OffHeapInstructionEnd();
+ if (Builtins::IsEmbeddedBuiltin(this)) return OffHeapInstructionEnd();
#endif
- return instruction_end();
+ return raw_instruction_end();
}
int Code::GetUnwindingInfoSizeOffset() const {
DCHECK(has_unwinding_info());
- return RoundUp(kHeaderSize + instruction_size(), kInt64Size);
+ return RoundUp(kHeaderSize + raw_instruction_size(), kInt64Size);
}
int Code::unwinding_info_size() const {
@@ -262,8 +284,8 @@ byte* Code::unwinding_info_end() const {
int Code::body_size() const {
int unpadded_body_size =
has_unwinding_info()
- ? static_cast<int>(unwinding_info_end() - instruction_start())
- : instruction_size();
+ ? static_cast<int>(unwinding_info_end() - raw_instruction_start())
+ : raw_instruction_size();
return RoundUp(unpadded_body_size, kObjectAlignment);
}
@@ -271,7 +293,6 @@ int Code::SizeIncludingMetadata() const {
int size = CodeSize();
size += relocation_info()->Size();
size += deoptimization_data()->Size();
- size += protected_instructions()->Size();
return size;
}
@@ -287,7 +308,7 @@ int Code::relocation_size() const {
return unchecked_relocation_info()->length();
}
-byte* Code::entry() const { return instruction_start(); }
+byte* Code::entry() const { return raw_instruction_start(); }
bool Code::contains(byte* inner_pointer) {
return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
@@ -295,9 +316,9 @@ bool Code::contains(byte* inner_pointer) {
int Code::ExecutableSize() const {
// Check that the assumptions about the layout of the code object holds.
- DCHECK_EQ(static_cast<int>(instruction_start() - address()),
+ DCHECK_EQ(static_cast<int>(raw_instruction_start() - address()),
Code::kHeaderSize);
- return instruction_size() + Code::kHeaderSize;
+ return raw_instruction_size() + Code::kHeaderSize;
}
int Code::CodeSize() const { return SizeFor(body_size()); }
@@ -320,8 +341,10 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
inline bool Code::is_interpreter_trampoline_builtin() const {
Builtins* builtins = GetIsolate()->builtins();
+ Code* interpreter_entry_trampoline =
+ builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool is_interpreter_trampoline =
- (this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
+ (builtin_index() == interpreter_entry_trampoline->builtin_index() ||
this == builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance) ||
this == builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch));
DCHECK_IMPLIES(is_interpreter_trampoline, !Builtins::IsLazy(builtin_index()));
@@ -330,28 +353,23 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
inline bool Code::checks_optimization_marker() const {
Builtins* builtins = GetIsolate()->builtins();
+ Code* interpreter_entry_trampoline =
+ builtins->builtin(Builtins::kInterpreterEntryTrampoline);
bool checks_marker =
(this == builtins->builtin(Builtins::kCompileLazy) ||
- this == builtins->builtin(Builtins::kInterpreterEntryTrampoline) ||
- this == builtins->builtin(Builtins::kCheckOptimizationMarker));
+ builtin_index() == interpreter_entry_trampoline->builtin_index());
DCHECK_IMPLIES(checks_marker, !Builtins::IsLazy(builtin_index()));
return checks_marker ||
(kind() == OPTIMIZED_FUNCTION && marked_for_deoptimization());
}
-inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
-}
-
inline bool Code::has_tagged_params() const {
- int flags = READ_UINT32_FIELD(this, kFlagsOffset);
- return HasTaggedStackField::decode(flags);
+ return kind() != JS_TO_WASM_FUNCTION && kind() != C_WASM_ENTRY &&
+ kind() != WASM_FUNCTION;
}
-inline void Code::set_has_tagged_params(bool value) {
- int previous = READ_UINT32_FIELD(this, kFlagsOffset);
- int updated = HasTaggedStackField::update(previous, value);
- WRITE_UINT32_FIELD(this, kFlagsOffset, updated);
+inline bool Code::has_unwinding_info() const {
+ return HasUnwindingInfoField::decode(READ_UINT32_FIELD(this, kFlagsOffset));
}
inline bool Code::is_turbofanned() const {
@@ -482,15 +500,24 @@ bool Code::is_stub() const { return kind() == STUB; }
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
-Address Code::constant_pool() {
- Address constant_pool = nullptr;
+int Code::constant_pool_offset() const {
+ if (!FLAG_enable_embedded_constant_pool) return InstructionSize();
+ return READ_INT_FIELD(this, kConstantPoolOffset);
+}
+
+void Code::set_constant_pool_offset(int value) {
+ if (!FLAG_enable_embedded_constant_pool) return;
+ WRITE_INT_FIELD(this, kConstantPoolOffset, value);
+}
+
+Address Code::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
int offset = constant_pool_offset();
- if (offset < instruction_size()) {
- constant_pool = FIELD_ADDR(this, kHeaderSize + offset);
+ if (offset < InstructionSize()) {
+ return InstructionStart() + offset;
}
}
- return constant_pool;
+ return nullptr;
}
Code* Code::GetCodeFromTargetAddress(Address address) {
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 19e1002f77..a33a08d731 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -34,7 +34,6 @@ class Code : public HeapObject {
V(REGEXP) \
V(WASM_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
- V(WASM_TO_WASM_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(WASM_INTERPRETER_ENTRY) \
V(C_WASM_ENTRY)
@@ -55,16 +54,18 @@ class Code : public HeapObject {
// [instruction_size]: Size of the native instructions, including embedded
// data such as the safepoints table.
- inline int instruction_size() const;
- inline void set_instruction_size(int value);
+ inline int raw_instruction_size() const;
+ inline void set_raw_instruction_size(int value);
// Returns the size of the native instructions, including embedded
// data such as the safepoints table. For off-heap code objects
// this may from instruction_size in that this will return the size of the
// off-heap instruction stream rather than the on-heap trampoline located
// at instruction_start.
- inline int InstructionSize();
- int OffHeapInstructionSize();
+ inline int InstructionSize() const;
+#ifdef V8_EMBEDDED_BUILTINS
+ int OffHeapInstructionSize() const;
+#endif
// [relocation_info]: Code relocation information
DECL_ACCESSORS(relocation_info, ByteArray)
@@ -78,18 +79,9 @@ class Code : public HeapObject {
DECL_ACCESSORS(source_position_table, Object)
inline ByteArray* SourcePositionTable() const;
- // TODO(mtrofin): remove when we don't need FLAG_wasm_jit_to_native
- // [protected instructions]: Array containing list of protected
- // instructions and corresponding landing pad offset.
- DECL_ACCESSORS(protected_instructions, FixedArray)
-
// [code_data_container]: A container indirection for all mutable fields.
DECL_ACCESSORS(code_data_container, CodeDataContainer)
- // [trap_handler_index]: An index into the trap handler's master list of code
- // objects.
- DECL_ACCESSORS(trap_handler_index, Smi)
-
// [stub_key]: The major/minor key of a code stub.
inline uint32_t stub_key() const;
inline void set_stub_key(uint32_t key);
@@ -123,10 +115,8 @@ class Code : public HeapObject {
// feedback vector.
inline bool checks_optimization_marker() const;
- // [has_tagged_params]: For compiled code or builtins: Tells whether the
- // outgoing parameters of this code are tagged pointers. True for other kinds.
+ // Tells whether the outgoing parameters of this code are tagged pointers.
inline bool has_tagged_params() const;
- inline void set_has_tagged_params(bool value);
// [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
// code object was generated by the TurboFan optimizing compiler.
@@ -189,7 +179,7 @@ class Code : public HeapObject {
inline void set_is_exception_caught(bool flag);
// [constant_pool]: The constant pool for this function.
- inline Address constant_pool();
+ inline Address constant_pool() const;
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
@@ -221,22 +211,26 @@ class Code : public HeapObject {
static inline Object* GetObjectFromCodeEntry(Address code_entry);
// Returns the address of the first instruction.
- inline byte* instruction_start() const;
+ inline byte* raw_instruction_start() const;
// Returns the address of the first instruction. For off-heap code objects
// this differs from instruction_start (which would point to the off-heap
// trampoline instead).
- inline Address InstructionStart();
- Address OffHeapInstructionStart();
+ inline Address InstructionStart() const;
+#ifdef V8_EMBEDDED_BUILTINS
+ Address OffHeapInstructionStart() const;
+#endif
// Returns the address right after the last instruction.
- inline byte* instruction_end() const;
+ inline byte* raw_instruction_end() const;
// Returns the address right after the last instruction. For off-heap code
// objects this differs from instruction_end (which would point to the
// off-heap trampoline instead).
- inline Address InstructionEnd();
- Address OffHeapInstructionEnd();
+ inline Address InstructionEnd() const;
+#ifdef V8_EMBEDDED_BUILTINS
+ Address OffHeapInstructionEnd() const;
+#endif
// Returns the size of the instructions, padding, relocation and unwinding
// information.
@@ -254,22 +248,22 @@ class Code : public HeapObject {
//
// The body of all code objects has the following layout.
//
- // +--------------------------+ <-- instruction_start()
+ // +--------------------------+ <-- raw_instruction_start()
// | instructions |
// | ... |
// +--------------------------+
// | relocation info |
// | ... |
- // +--------------------------+ <-- instruction_end()
+ // +--------------------------+ <-- raw_instruction_end()
//
- // If has_unwinding_info() is false, instruction_end() points to the first
+ // If has_unwinding_info() is false, raw_instruction_end() points to the first
// memory location after the end of the code object. Otherwise, the body
// continues as follows:
//
// +--------------------------+
// | padding to the next |
// | 8-byte aligned address |
- // +--------------------------+ <-- instruction_end()
+ // +--------------------------+ <-- raw_instruction_end()
// | [unwinding_info_size] |
// | as uint64_t |
// +--------------------------+ <-- unwinding_info_start()
@@ -326,16 +320,19 @@ class Code : public HeapObject {
void PrintDeoptLocation(FILE* out, const char* str, Address pc);
bool CanDeoptAt(Address pc);
+ void SetMarkedForDeoptimization(const char* reason);
+
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
-#ifdef VERIFY_HEAP
- void VerifyEmbeddedObjectsDependency();
-#endif
#ifdef DEBUG
enum VerifyMode { kNoContextSpecificPointers, kNoContextRetainingPointers };
void VerifyEmbeddedObjects(VerifyMode mode = kNoContextRetainingPointers);
#endif // DEBUG
+#ifdef V8_EMBEDDED_BUILTINS
+ bool IsProcessIndependent();
+#endif
+
inline bool CanContainWeakObjects();
inline bool IsWeakObject(Object* object);
@@ -371,10 +368,8 @@ class Code : public HeapObject {
kRelocationInfoOffset + kPointerSize;
static const int kSourcePositionTableOffset =
kDeoptimizationDataOffset + kPointerSize;
- static const int kProtectedInstructionsOffset =
- kSourcePositionTableOffset + kPointerSize;
static const int kCodeDataContainerOffset =
- kProtectedInstructionsOffset + kPointerSize;
+ kSourcePositionTableOffset + kPointerSize;
static const int kInstructionSizeOffset =
kCodeDataContainerOffset + kPointerSize;
static const int kFlagsOffset = kInstructionSizeOffset + kIntSize;
@@ -385,8 +380,7 @@ class Code : public HeapObject {
static const int kConstantPoolOffset = kStubKeyOffset + kIntSize;
static const int kBuiltinIndexOffset =
kConstantPoolOffset + kConstantPoolSize;
- static const int kTrapHandlerIndex = kBuiltinIndexOffset + kIntSize;
- static const int kHeaderPaddingStart = kTrapHandlerIndex + kPointerSize;
+ static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -398,8 +392,6 @@ class Code : public HeapObject {
// Objects embedded into code is visited via reloc info.
static const int kDataStart = kInstructionSizeOffset;
- enum TrapFields { kTrapCodeOffset, kTrapLandingOffset, kTrapDataSize };
-
inline int GetUnwindingInfoSizeOffset() const;
class BodyDescriptor;
@@ -408,7 +400,6 @@ class Code : public HeapObject {
#define CODE_FLAGS_BIT_FIELDS(V, _) \
V(HasUnwindingInfoField, bool, 1, _) \
V(KindField, Kind, 5, _) \
- V(HasTaggedStackField, bool, 1, _) \
V(IsTurbofannedField, bool, 1, _) \
V(StackSlotsField, int, 24, _)
DEFINE_BIT_FIELDS(CODE_FLAGS_BIT_FIELDS)
@@ -505,13 +496,30 @@ class AbstractCode : public HeapObject {
int SourceStatementPosition(int offset);
// Returns the address of the first instruction.
- inline Address instruction_start();
+ inline Address raw_instruction_start();
+
+ // Returns the address of the first instruction. For off-heap code objects
+ // this differs from instruction_start (which would point to the off-heap
+ // trampoline instead).
+ inline Address InstructionStart();
// Returns the address right after the last instruction.
- inline Address instruction_end();
+ inline Address raw_instruction_end();
+
+ // Returns the address right after the last instruction. For off-heap code
+ // objects this differs from instruction_end (which would point to the
+ // off-heap trampoline instead).
+ inline Address InstructionEnd();
// Returns the size of the code instructions.
- inline int instruction_size();
+ inline int raw_instruction_size();
+
+ // Returns the size of the native instructions, including embedded
+ // data such as the safepoints table. For off-heap code objects
+ // this may from instruction_size in that this will return the size of the
+ // off-heap instruction stream rather than the on-heap trampoline located
+ // at instruction_start.
+ inline int InstructionSize();
// Return the source position table.
inline ByteArray* source_position_table();
@@ -565,9 +573,6 @@ class AbstractCode : public HeapObject {
class DependentCode : public FixedArray {
public:
enum DependencyGroup {
- // Group of code that weakly embed this map and depend on being
- // deoptimized when the map is garbage collected.
- kWeakCodeGroup,
// Group of code that embed a transition to this map, and depend on being
// deoptimized when the transition is replaced by a new version.
kTransitionGroup,
@@ -636,7 +641,6 @@ class DependentCode : public FixedArray {
DECL_CAST(DependentCode)
static const char* DependencyGroupName(DependencyGroup group);
- static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
private:
static Handle<DependentCode> Insert(Handle<DependentCode> entries,
@@ -735,7 +739,7 @@ class BytecodeArray : public FixedArrayBase {
// Dispatched behavior.
inline int BytecodeArraySize();
- inline int instruction_size();
+ inline int raw_instruction_size();
// Returns the size of bytecode and its metadata. This includes the size of
// bytecode, constant pool, source position table, and handler table.
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index 084ea7b15c..db651d9f4e 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -39,7 +39,7 @@ bool DebugInfo::HasDebugBytecodeArray() {
BytecodeArray* DebugInfo::OriginalBytecodeArray() {
DCHECK(HasDebugBytecodeArray());
- return shared()->bytecode_array();
+ return shared()->GetBytecodeArray();
}
BytecodeArray* DebugInfo::DebugBytecodeArray() {
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 6505ca6e7f..f79aa5cea5 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -12,9 +12,13 @@ bool DebugInfo::IsEmpty() const { return flags() == kNone; }
bool DebugInfo::HasBreakInfo() const { return (flags() & kHasBreakInfo) != 0; }
-bool DebugInfo::IsPreparedForBreakpoints() const {
- DCHECK(HasBreakInfo());
- return (flags() & kPreparedForBreakpoints) != 0;
+DebugInfo::ExecutionMode DebugInfo::DebugExecutionMode() const {
+ return (flags() & kDebugExecutionMode) != 0 ? kSideEffects : kBreakpoints;
+}
+
+void DebugInfo::SetDebugExecutionMode(ExecutionMode value) {
+ set_flags(value == kSideEffects ? (flags() | kDebugExecutionMode)
+ : (flags() & ~kDebugExecutionMode));
}
bool DebugInfo::ClearBreakInfo() {
@@ -24,8 +28,9 @@ bool DebugInfo::ClearBreakInfo() {
set_break_points(isolate->heap()->empty_fixed_array());
int new_flags = flags();
- new_flags &= ~kHasBreakInfo & ~kPreparedForBreakpoints;
+ new_flags &= ~kHasBreakInfo & ~kPreparedForDebugExecution;
new_flags &= ~kBreakAtEntry & ~kCanBreakAtEntry;
+ new_flags &= ~kDebugExecutionMode;
set_flags(new_flags);
return new_flags == kNone;
@@ -346,15 +351,14 @@ void CoverageInfo::ResetBlockCount(int slot_index) {
set(slot_start + kSlotBlockCountIndex, Smi::kZero);
}
-void CoverageInfo::Print(String* function_name) {
+void CoverageInfo::Print(std::unique_ptr<char[]> function_name) {
DCHECK(FLAG_trace_block_coverage);
DisallowHeapAllocation no_gc;
OFStream os(stdout);
os << "Coverage info (";
- if (function_name->length() > 0) {
- auto function_name_cstr = function_name->ToCString();
- os << function_name_cstr.get();
+ if (strlen(function_name.get()) > 0) {
+ os << function_name.get();
} else {
os << "{anonymous}";
}
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 767cd7e81b..f3e0256ae3 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -24,11 +24,13 @@ class DebugInfo : public Struct {
enum Flag {
kNone = 0,
kHasBreakInfo = 1 << 0,
- kPreparedForBreakpoints = 1 << 1,
+ kPreparedForDebugExecution = 1 << 1,
kHasCoverageInfo = 1 << 2,
kBreakAtEntry = 1 << 3,
- kCanBreakAtEntry = 1 << 4
+ kCanBreakAtEntry = 1 << 4,
+ kDebugExecutionMode = 1 << 5
};
+
typedef base::Flags<Flag> Flags;
// A bitfield that lists uses of the current instance.
@@ -43,13 +45,27 @@ class DebugInfo : public Struct {
// DebugInfo can be detached from the SharedFunctionInfo iff it is empty.
bool IsEmpty() const;
+ // --- Debug execution ---
+ // -----------------------
+
+ enum ExecutionMode { kBreakpoints = 0, kSideEffects = kDebugExecutionMode };
+
+ // Returns current debug execution mode. Debug execution mode defines by
+ // applied to bytecode patching. False for breakpoints, true for side effect
+ // checks.
+ ExecutionMode DebugExecutionMode() const;
+ void SetDebugExecutionMode(ExecutionMode value);
+
+ inline bool HasDebugBytecodeArray();
+
+ inline BytecodeArray* OriginalBytecodeArray();
+ inline BytecodeArray* DebugBytecodeArray();
+
// --- Break points ---
// --------------------
bool HasBreakInfo() const;
- bool IsPreparedForBreakpoints() const;
-
// Clears all fields related to break points. Returns true iff the
// DebugInfo is now empty.
bool ClearBreakInfo();
@@ -82,11 +98,6 @@ class DebugInfo : public Struct {
// Get the number of break points for this function.
int GetBreakPointCount();
- inline bool HasDebugBytecodeArray();
-
- inline BytecodeArray* OriginalBytecodeArray();
- inline BytecodeArray* DebugBytecodeArray();
-
// Returns whether we should be able to break before entering the function.
// This is true for functions with no source, e.g. builtins.
bool CanBreakAtEntry() const;
@@ -180,7 +191,7 @@ class CoverageInfo : public FixedArray {
DECL_CAST(CoverageInfo)
// Print debug info.
- void Print(String* function_name);
+ void Print(std::unique_ptr<char[]> function_name);
private:
static int FirstIndexForSlot(int slot_index) {
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index a89a31fcd5..f520cfb2e0 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -164,20 +164,20 @@ class DescriptorArray : public FixedArray {
// Returns the fixed array length required to hold number_of_descriptors
// descriptors.
- static int LengthFor(int number_of_descriptors) {
+ static constexpr int LengthFor(int number_of_descriptors) {
return ToKeyIndex(number_of_descriptors);
}
- static int ToDetailsIndex(int descriptor_number) {
+ static constexpr int ToDetailsIndex(int descriptor_number) {
return kFirstIndex + (descriptor_number * kEntrySize) + kEntryDetailsIndex;
}
// Conversion from descriptor number to array indices.
- static int ToKeyIndex(int descriptor_number) {
+ static constexpr int ToKeyIndex(int descriptor_number) {
return kFirstIndex + (descriptor_number * kEntrySize) + kEntryKeyIndex;
}
- static int ToValueIndex(int descriptor_number) {
+ static constexpr int ToValueIndex(int descriptor_number) {
return kFirstIndex + (descriptor_number * kEntrySize) + kEntryValueIndex;
}
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 7cc0e5f5b3..4aca71d563 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -48,11 +48,11 @@ class Dictionary : public HashTable<Derived, Shape> {
}
// Delete a property from the dictionary.
- MUST_USE_RESULT static Handle<Derived> DeleteEntry(Handle<Derived> dictionary,
- int entry);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> DeleteEntry(
+ Handle<Derived> dictionary, int entry);
// Attempt to shrink the dictionary after deletion of key.
- MUST_USE_RESULT static inline Handle<Derived> Shrink(
+ V8_WARN_UNUSED_RESULT static inline Handle<Derived> Shrink(
Handle<Derived> dictionary) {
return DerivedHashTable::Shrink(dictionary);
}
@@ -73,16 +73,18 @@ class Dictionary : public HashTable<Derived, Shape> {
inline void SetEntry(int entry, Object* key, Object* value,
PropertyDetails details);
- MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
- Key key, Handle<Object> value,
- PropertyDetails details,
- int* entry_out = nullptr);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out = nullptr);
protected:
// Generic at put operation.
- MUST_USE_RESULT static Handle<Derived> AtPut(Handle<Derived> dictionary,
- Key key, Handle<Object> value,
- PropertyDetails details);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> AtPut(Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details);
};
template <typename Key>
@@ -152,7 +154,7 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
}
// Creates a new dictionary.
- MUST_USE_RESULT static Handle<Derived> New(
+ V8_WARN_UNUSED_RESULT static Handle<Derived> New(
Isolate* isolate, int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -165,6 +167,8 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
static Handle<FixedArray> IterationIndices(Handle<Derived> dictionary);
// Copies enumerable keys to preallocated fixed array.
+ // Does not throw for uninitialized exports in module namespace objects, so
+ // this has to be checked separately.
static void CopyEnumKeysTo(Handle<Derived> dictionary,
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator);
@@ -172,14 +176,15 @@ class BaseNameDictionary : public Dictionary<Derived, Shape> {
// Ensure enough space for n additional elements.
static Handle<Derived> EnsureCapacity(Handle<Derived> dictionary, int n);
- MUST_USE_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
+ V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
Handle<Derived> dictionary, Key key, Handle<Object> value,
PropertyDetails details, int* entry_out = nullptr);
- MUST_USE_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
- Key key, Handle<Object> value,
- PropertyDetails details,
- int* entry_out = nullptr);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> Add(Handle<Derived> dictionary,
+ Key key,
+ Handle<Object> value,
+ PropertyDetails details,
+ int* entry_out = nullptr);
};
class NameDictionary
@@ -277,7 +282,7 @@ class SimpleNumberDictionary
public:
DECL_CAST(SimpleNumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<SimpleNumberDictionary> Set(
+ V8_WARN_UNUSED_RESULT static Handle<SimpleNumberDictionary> Set(
Handle<SimpleNumberDictionary> dictionary, uint32_t key,
Handle<Object> value);
@@ -298,7 +303,7 @@ class NumberDictionary
DECL_CAST(NumberDictionary)
// Type specific at put (default NONE attributes is used when adding).
- MUST_USE_RESULT static Handle<NumberDictionary> Set(
+ V8_WARN_UNUSED_RESULT static Handle<NumberDictionary> Set(
Handle<NumberDictionary> dictionary, uint32_t key, Handle<Object> value,
Handle<JSObject> dictionary_holder = Handle<JSObject>::null(),
PropertyDetails details = PropertyDetails::Empty());
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index bee28d93e2..8c708f48cf 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -16,7 +16,8 @@ namespace internal {
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(FixedArrayExact, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(FixedArrayOfWeakCells, FIXED_ARRAY_TYPE)
+TYPE_CHECKER(WeakFixedArray, WEAK_FIXED_ARRAY_TYPE)
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(ByteArray)
@@ -25,10 +26,13 @@ CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(FixedArrayOfWeakCells)
CAST_ACCESSOR(WeakFixedArray)
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
+SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
Object* FixedArrayBase::unchecked_synchronized_length() const {
return ACQUIRE_READ_FIELD(this, kLengthOffset);
@@ -142,7 +146,7 @@ void FixedArray::FillWithHoles(int from, int to) {
}
Object** FixedArray::data_start() {
- return HeapObject::RawField(this, kHeaderSize);
+ return HeapObject::RawField(this, OffsetOfElementAt(0));
}
Object** FixedArray::RawFieldOfElementAt(int index) {
@@ -215,36 +219,53 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
}
}
-Object* WeakFixedArray::Get(int index) const {
+MaybeObject* WeakFixedArray::Get(int index) const {
+ SLOW_DCHECK(index >= 0 && index < this->length());
+ return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
+}
+
+void WeakFixedArray::Set(int index, MaybeObject* value) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, length());
+ int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(this, offset, value);
+ WEAK_WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+MaybeObject** WeakFixedArray::data_start() {
+ return HeapObject::RawMaybeWeakField(this, kHeaderSize);
+}
+
+Object* FixedArrayOfWeakCells::Get(int index) const {
Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
if (raw->IsSmi()) return raw;
DCHECK(raw->IsWeakCell());
return WeakCell::cast(raw)->value();
}
-bool WeakFixedArray::IsEmptySlot(int index) const {
+bool FixedArrayOfWeakCells::IsEmptySlot(int index) const {
DCHECK(index < Length());
return Get(index)->IsSmi();
}
-void WeakFixedArray::Clear(int index) {
+void FixedArrayOfWeakCells::Clear(int index) {
FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
}
-int WeakFixedArray::Length() const {
+int FixedArrayOfWeakCells::Length() const {
return FixedArray::cast(this)->length() - kFirstIndex;
}
-int WeakFixedArray::last_used_index() const {
+int FixedArrayOfWeakCells::last_used_index() const {
return Smi::ToInt(FixedArray::cast(this)->get(kLastUsedIndexIndex));
}
-void WeakFixedArray::set_last_used_index(int index) {
+void FixedArrayOfWeakCells::set_last_used_index(int index) {
FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
}
template <class T>
-T* WeakFixedArray::Iterator::Next() {
+T* FixedArrayOfWeakCells::Iterator::Next() {
if (list_ != nullptr) {
// Assert that list did not change during iteration.
DCHECK_EQ(last_used_index_, list_->last_used_index());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 1861f0c735..022821244f 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -243,14 +243,63 @@ class FixedDoubleArray : public FixedArrayBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
};
-class WeakFixedArray : public FixedArray {
+// WeakFixedArray describes fixed-sized arrays with element type
+// MaybeObject*.
+class WeakFixedArray : public HeapObject {
public:
- // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
- // This function does not check if the value exists already, callers must
- // ensure this themselves if necessary.
- static Handle<WeakFixedArray> Add(Handle<Object> maybe_array,
- Handle<HeapObject> value,
- int* assigned_index = nullptr);
+ DECL_CAST(WeakFixedArray)
+
+ inline MaybeObject* Get(int index) const;
+
+ // Setter that uses write barrier.
+ inline void Set(int index, MaybeObject* value);
+
+ static constexpr int SizeFor(int length) {
+ return kHeaderSize + length * kPointerSize;
+ }
+
+ DECL_INT_ACCESSORS(length)
+
+ // Get and set the length using acquire loads and release stores.
+ inline int synchronized_length() const;
+ inline void synchronized_set_length(int value);
+
+ // Gives access to raw memory which stores the array's data.
+ inline MaybeObject** data_start();
+
+ DECL_PRINTER(WeakFixedArray)
+ DECL_VERIFIER(WeakFixedArray)
+
+ class BodyDescriptor;
+ typedef BodyDescriptor BodyDescriptorWeak;
+
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+
+ static const int kMaxLength =
+ (FixedArray::kMaxSize - kHeaderSize) / kPointerSize;
+
+ private:
+ static int OffsetOfElementAt(int index) {
+ return kHeaderSize + index * kPointerSize;
+ }
+
+ friend class Heap;
+
+ static const int kFirstIndex = 1;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+};
+
+// Deprecated. Use WeakFixedArray instead.
+class FixedArrayOfWeakCells : public FixedArray {
+ public:
+ // If |maybe_array| is not a FixedArrayOfWeakCells, a fresh one will be
+ // allocated. This function does not check if the value exists already,
+ // callers must ensure this themselves if necessary.
+ static Handle<FixedArrayOfWeakCells> Add(Handle<Object> maybe_array,
+ Handle<HeapObject> value,
+ int* assigned_index = nullptr);
// Returns true if an entry was found and removed.
bool Remove(Handle<HeapObject> value);
@@ -282,7 +331,7 @@ class WeakFixedArray : public FixedArray {
private:
int index_;
- WeakFixedArray* list_;
+ FixedArrayOfWeakCells* list_;
#ifdef DEBUG
int last_used_index_;
DisallowHeapAllocation no_gc_;
@@ -290,16 +339,17 @@ class WeakFixedArray : public FixedArray {
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
- DECL_CAST(WeakFixedArray)
+ DECL_CAST(FixedArrayOfWeakCells)
private:
static const int kLastUsedIndexIndex = 0;
static const int kFirstIndex = 1;
- static Handle<WeakFixedArray> Allocate(
- Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
+ static Handle<FixedArrayOfWeakCells> Allocate(
+ Isolate* isolate, int size,
+ Handle<FixedArrayOfWeakCells> initialize_from);
- static void Set(Handle<WeakFixedArray> array, int index,
+ static void Set(Handle<FixedArrayOfWeakCells> array, int index,
Handle<HeapObject> value);
inline void clear(int index);
@@ -310,7 +360,7 @@ class WeakFixedArray : public FixedArray {
void set(int index, Smi* value);
void set(int index, Object* value);
void set(int index, Object* value, WriteBarrierMode mode);
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArrayOfWeakCells);
};
// Generic array grows dynamically with O(1) amortized insertion.
@@ -322,15 +372,9 @@ class WeakFixedArray : public FixedArray {
// underlying FixedArray starting at kFirstIndex.
class ArrayList : public FixedArray {
public:
- enum AddMode {
- kNone,
- // Use this if GC can delete elements from the array.
- kReloadLengthAfterAllocation,
- };
- static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
- AddMode mode = kNone);
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj);
static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
- Handle<Object> obj2, AddMode = kNone);
+ Handle<Object> obj2);
static Handle<ArrayList> New(Isolate* isolate, int size);
// Returns the number of elements in the list, not the allocated size, which
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 59bbfc2b63..55b7838484 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -60,7 +60,7 @@ class FrameArray : public FixedArray {
int flags);
static Handle<FrameArray> AppendWasmFrame(
Handle<FrameArray> in, Handle<WasmInstanceObject> wasm_instance,
- int wasm_function_index, WasmCodeWrapper code, int offset, int flags);
+ int wasm_function_index, wasm::WasmCode* code, int offset, int flags);
DECL_CAST(FrameArray)
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index a058b7df39..a9ce37991b 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -134,7 +134,7 @@ class HashTable : public HashTableBase {
typedef typename Shape::Key Key;
// Returns a new HashTable object.
- MUST_USE_RESULT static Handle<Derived> New(
+ V8_WARN_UNUSED_RESULT static Handle<Derived> New(
Isolate* isolate, int at_least_space_for,
PretenureFlag pretenure = NOT_TENURED,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -181,6 +181,9 @@ class HashTable : public HashTableBase {
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartIndex) / kEntrySize;
+ // Don't shrink a HashTable below this capacity.
+ static const int kMinShrinkCapacity = 16;
+
// Maximum length to create a regular HashTable (aka. non large object).
static const int kMaxRegularCapacity = 16384;
@@ -190,7 +193,7 @@ class HashTable : public HashTableBase {
}
// Ensure enough space for n additional elements.
- MUST_USE_RESULT static Handle<Derived> EnsureCapacity(
+ V8_WARN_UNUSED_RESULT static Handle<Derived> EnsureCapacity(
Handle<Derived> table, int n, PretenureFlag pretenure = NOT_TENURED);
// Returns true if this table has sufficient capacity for adding n elements.
@@ -199,16 +202,16 @@ class HashTable : public HashTableBase {
protected:
friend class ObjectHashTable;
- MUST_USE_RESULT static Handle<Derived> NewInternal(Isolate* isolate,
- int capacity,
- PretenureFlag pretenure);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> NewInternal(
+ Isolate* isolate, int capacity, PretenureFlag pretenure);
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
// Attempt to shrink hash table after removal of key.
- MUST_USE_RESULT static Handle<Derived> Shrink(Handle<Derived> table);
+ V8_WARN_UNUSED_RESULT static Handle<Derived> Shrink(
+ Handle<Derived> table, int additionalCapacity = 0);
private:
// Ensure that kMaxRegularCapacity yields a non-large object dictionary.
@@ -285,7 +288,7 @@ class ObjectHashTable
DECL_CAST(ObjectHashTable)
// Attempt to shrink hash table after removal of key.
- MUST_USE_RESULT static inline Handle<ObjectHashTable> Shrink(
+ V8_WARN_UNUSED_RESULT static inline Handle<ObjectHashTable> Shrink(
Handle<ObjectHashTable> table);
// Looks up the value associated with the given key. The hole value is
@@ -574,48 +577,6 @@ class OrderedHashMap : public OrderedHashTable<OrderedHashMap, 2> {
static const int kValueOffset = 1;
};
-class WeakHashTableShape : public BaseShape<Handle<Object>> {
- public:
- static inline bool IsMatch(Handle<Object> key, Object* other);
- static inline uint32_t Hash(Isolate* isolate, Handle<Object> key);
- static inline uint32_t HashForObject(Isolate* isolate, Object* object);
- static inline Handle<Object> AsHandle(Isolate* isolate, Handle<Object> key);
- static inline int GetMapRootIndex();
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
- static const bool kNeedsHoleCheck = false;
-};
-
-// WeakHashTable maps keys that are arbitrary heap objects to heap object
-// values. The table wraps the keys in weak cells and store values directly.
-// Thus it references keys weakly and values strongly.
-class WeakHashTable : public HashTable<WeakHashTable, WeakHashTableShape> {
- typedef HashTable<WeakHashTable, WeakHashTableShape> DerivedHashTable;
-
- public:
- DECL_CAST(WeakHashTable)
-
- // Looks up the value associated with the given key. The hole value is
- // returned in case the key is not present.
- Object* Lookup(Handle<HeapObject> key);
-
- // Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT static Handle<WeakHashTable> Put(Handle<WeakHashTable> table,
- Handle<HeapObject> key,
- Handle<HeapObject> value);
-
- private:
- friend class MarkCompactCollector;
-
- void AddEntry(int entry, Handle<WeakCell> key, Handle<HeapObject> value);
-
- // Returns the index to the value of an entry.
- static inline int EntryToValueIndex(int entry) {
- return EntryToIndex(entry) + 1;
- }
-};
-
// This is similar to the OrderedHashTable, except for the memory
// layout where we use byte instead of Smi. The max capacity of this
// is only 254, we transition to an OrderedHashTable beyond that
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 6b8e18014a..3047642111 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -11,9 +11,10 @@
#include <memory>
#include "src/api.h"
-#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
+#include "src/managed.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
#include "unicode/brkiter.h"
@@ -122,8 +123,20 @@ icu::SimpleDateFormat* CreateICUDateFormat(Isolate* isolate,
icu::SimpleDateFormat* date_format = nullptr;
icu::UnicodeString skeleton;
if (ExtractStringSetting(isolate, options, "skeleton", &skeleton)) {
+ // See https://github.com/tc39/ecma402/issues/225 . The best pattern
+ // generation needs to be done in the base locale according to the
+ // current spec however odd it may be. See also crbug.com/826549 .
+ // This is a temporary work-around to get v8's external behavior to match
+ // the current spec, but does not follow the spec provisions mentioned
+ // in the above Ecma 402 issue.
+ // TODO(jshin): The spec may need to be revised because using the base
+ // locale for the pattern match is not quite right. Moreover, what to
+ // do with 'related year' part when 'chinese/dangi' calendar is specified
+ // has to be discussed. Revisit once the spec is clarified/revised.
+ icu::Locale no_extension_locale(icu_locale.getBaseName());
std::unique_ptr<icu::DateTimePatternGenerator> generator(
- icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+ icu::DateTimePatternGenerator::createInstance(no_extension_locale,
+ status));
icu::UnicodeString pattern;
if (U_SUCCESS(status))
pattern = generator->getBestPattern(skeleton, status);
@@ -901,10 +914,11 @@ void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-icu::Collator* Collator::InitializeCollator(Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved) {
+bool Collator::InitializeCollator(Isolate* isolate,
+ Handle<JSObject> collator_holder,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
@@ -916,7 +930,7 @@ icu::Collator* Collator::InitializeCollator(Isolate* isolate,
uloc_forLanguageTag(*bcp47_locale, icu_result, ULOC_FULLNAME_CAPACITY,
&icu_length, &status);
if (U_FAILURE(status) || icu_length == 0) {
- return nullptr;
+ return false;
}
icu_locale = icu::Locale(icu_result);
}
@@ -938,17 +952,16 @@ icu::Collator* Collator::InitializeCollator(Isolate* isolate,
SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
}
- return collator;
+ Handle<Managed<icu::Collator>> managed =
+ Managed<icu::Collator>::From(isolate, collator);
+ collator_holder->SetEmbedderField(0, *managed);
+
+ return true;
}
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
- return reinterpret_cast<icu::Collator*>(obj->GetEmbedderField(0));
-}
-
-void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
- delete reinterpret_cast<icu::Collator*>(data.GetInternalField(0));
- GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
+ return Managed<icu::Collator>::cast(obj->GetEmbedderField(0))->get();
}
bool PluralRules::InitializePluralRules(Isolate* isolate, Handle<String> locale,
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index d4bdb1e067..84cf85c6da 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -77,20 +77,17 @@ class NumberFormat {
class Collator {
public:
- // Create a collator for the specificied locale and options. Returns the
- // resolved settings for the locale / options.
- static icu::Collator* InitializeCollator(Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
+ // Create a collator for the specificied locale and options. Stores the
+ // collator in the provided collator_holder.
+ static bool InitializeCollator(Isolate* isolate,
+ Handle<JSObject> collator_holder,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
// Unpacks collator object from corresponding JavaScript object.
static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
- // Release memory we allocated for the Collator once the JS object that holds
- // the pointer gets garbage collected.
- static void DeleteCollator(const v8::WeakCallbackInfo<void>& data);
-
// Layout description.
static const int kCollator = JSObject::kHeaderSize;
static const int kSize = kCollator + kPointerSize;
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 6bba2f0054..2500acfe98 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_ARRAY_INL_H_
#include "src/objects/js-array.h"
+#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -76,30 +77,48 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
-void* JSArrayBuffer::allocation_base() const {
- intptr_t ptr = READ_INTPTR_FIELD(this, kAllocationBaseOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-void JSArrayBuffer::set_allocation_base(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(this, kAllocationBaseOffset, ptr);
-}
-
size_t JSArrayBuffer::allocation_length() const {
- return *reinterpret_cast<const size_t*>(
- FIELD_ADDR_CONST(this, kAllocationLengthOffset));
+ if (backing_store() == nullptr) {
+ return 0;
+ }
+ // If this buffer is managed by the WasmMemoryTracker
+ if (is_wasm_memory()) {
+ const auto* data =
+ GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
+ backing_store());
+ DCHECK_NOT_NULL(data);
+ return data->allocation_length;
+ }
+ return byte_length()->Number();
}
-void JSArrayBuffer::set_allocation_length(size_t value) {
- (*reinterpret_cast<size_t*>(FIELD_ADDR(this, kAllocationLengthOffset))) =
- value;
+void* JSArrayBuffer::allocation_base() const {
+ if (backing_store() == nullptr) {
+ return nullptr;
+ }
+ // If this buffer is managed by the WasmMemoryTracker
+ if (is_wasm_memory()) {
+ const auto* data =
+ GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
+ backing_store());
+ DCHECK_NOT_NULL(data);
+ return data->allocation_base;
+ }
+ return backing_store();
}
ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
- return has_guard_region() ? AllocationMode::kReservation
- : AllocationMode::kNormal;
+ return is_wasm_memory() ? AllocationMode::kReservation
+ : AllocationMode::kNormal;
+}
+
+bool JSArrayBuffer::is_wasm_memory() const {
+ bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
+ DCHECK_EQ(is_wasm_memory,
+ GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
+ backing_store()));
+ return is_wasm_memory;
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
@@ -143,14 +162,6 @@ void JSArrayBuffer::set_is_shared(bool value) {
set_bit_field(IsShared::update(bit_field(), value));
}
-bool JSArrayBuffer::has_guard_region() const {
- return HasGuardRegion::decode(bit_field());
-}
-
-void JSArrayBuffer::set_has_guard_region(bool value) {
- set_bit_field(HasGuardRegion::update(bit_field(), value));
-}
-
bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
void JSArrayBuffer::set_is_growable(bool value) {
@@ -204,6 +215,14 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
}
+bool JSTypedArray::is_on_heap() const {
+ DisallowHeapAllocation no_gc;
+ // Checking that buffer()->backing_store() is not nullptr is not sufficient;
+ // it will be nullptr when byte_length is 0 as well.
+ FixedTypedArrayBase* fta(FixedTypedArrayBase::cast(elements()));
+ return fta->base_pointer() == fta;
+}
+
// static
MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
Handle<Object> receiver,
@@ -231,9 +250,17 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
-ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
-ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
-ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
+ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
+ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
+
+IterationKind JSArrayIterator::kind() const {
+ return static_cast<IterationKind>(
+ Smi::cast(READ_FIELD(this, kKindOffset))->value());
+}
+
+void JSArrayIterator::set_kind(IterationKind kind) {
+ WRITE_FIELD(this, kKindOffset, Smi::FromInt(static_cast<int>(kind)));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 09a54b38c1..6df0af17cb 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -51,17 +51,16 @@ class JSArray : public JSObject {
Handle<FixedArrayBase> storage);
// ES6 9.4.2.1
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSArray> o, Handle<Object> name,
PropertyDescriptor* desc, ShouldThrow should_throw);
static bool AnythingToArrayLength(Isolate* isolate,
Handle<Object> length_object,
uint32_t* output);
- MUST_USE_RESULT static Maybe<bool> ArraySetLength(Isolate* isolate,
- Handle<JSArray> a,
- PropertyDescriptor* desc,
- ShouldThrow should_throw);
+ V8_WARN_UNUSED_RESULT static Maybe<bool> ArraySetLength(
+ Isolate* isolate, Handle<JSArray> a, PropertyDescriptor* desc,
+ ShouldThrow should_throw);
// Checks whether the Array has the current realm's Array.prototype as its
// prototype. This function is best-effort and only gives a conservative
@@ -82,6 +81,8 @@ class JSArray : public JSObject {
static const int kLengthOffset = JSObject::kHeaderSize;
static const int kSize = kLengthOffset + kPointerSize;
+ static const int kLengthDescriptorIndex = 0;
+
// Max. number of elements being copied in Array builtins.
static const int kMaxCopyElements = 100;
@@ -100,6 +101,8 @@ class JSArray : public JSObject {
Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
Handle<Map> initial_map);
+// The JSArrayIterator describes JavaScript Array Iterators Objects, as
+// defined in ES section #sec-array-iterator-objects.
class JSArrayIterator : public JSObject {
public:
DECL_PRINTER(JSArrayIterator)
@@ -107,24 +110,20 @@ class JSArrayIterator : public JSObject {
DECL_CAST(JSArrayIterator)
- // [object]: the [[IteratedObject]] inobject property.
- DECL_ACCESSORS(object, Object)
-
- // [index]: The [[ArrayIteratorNextIndex]] inobject property.
- DECL_ACCESSORS(index, Object)
+ // [iterated_object]: the [[IteratedObject]] inobject property.
+ DECL_ACCESSORS(iterated_object, Object)
- // [map]: The Map of the [[IteratedObject]] field at the time the iterator is
- // allocated.
- DECL_ACCESSORS(object_map, Object)
+ // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
+ DECL_ACCESSORS(next_index, Object)
- // Return the ElementsKind that a JSArrayIterator's [[IteratedObject]] is
- // expected to have, based on its instance type.
- static ElementsKind ElementsKindForInstanceType(InstanceType instance_type);
+ // [kind]: the [[ArrayIterationKind]] inobject property.
+ inline IterationKind kind() const;
+ inline void set_kind(IterationKind kind);
static const int kIteratedObjectOffset = JSObject::kHeaderSize;
static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
- static const int kIteratedObjectMapOffset = kNextIndexOffset + kPointerSize;
- static const int kSize = kIteratedObjectMapOffset + kPointerSize;
+ static const int kKindOffset = kNextIndexOffset + kPointerSize;
+ static const int kSize = kKindOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
@@ -141,14 +140,10 @@ class JSArrayBuffer : public JSObject {
// [backing_store]: backing memory for this array
DECL_ACCESSORS(backing_store, void)
- // [allocation_base]: the start of the memory allocation for this array,
- // normally equal to backing_store
- DECL_ACCESSORS(allocation_base, void)
-
- // [allocation_length]: the size of the memory allocation for this array,
- // normally equal to byte_length
+ // For non-wasm, allocation_length and allocation_base are byte_length and
+ // backing_store, respectively.
inline size_t allocation_length() const;
- inline void set_allocation_length(size_t value);
+ inline void* allocation_base() const;
inline uint32_t bit_field() const;
inline void set_bit_field(uint32_t bits);
@@ -168,9 +163,6 @@ class JSArrayBuffer : public JSObject {
inline bool is_shared();
inline void set_is_shared(bool value);
- inline bool has_guard_region() const;
- inline void set_has_guard_region(bool value);
-
inline bool is_growable();
inline void set_is_growable(bool value);
@@ -183,33 +175,41 @@ class JSArrayBuffer : public JSObject {
struct Allocation {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
- Allocation(void* allocation_base, size_t length, AllocationMode mode)
- : allocation_base(allocation_base), length(length), mode(mode) {}
+ Allocation(void* allocation_base, size_t length, void* backing_store,
+ AllocationMode mode, bool is_wasm_memory)
+ : allocation_base(allocation_base),
+ length(length),
+ backing_store(backing_store),
+ mode(mode),
+ is_wasm_memory(is_wasm_memory) {}
void* allocation_base;
size_t length;
+ void* backing_store;
AllocationMode mode;
+ bool is_wasm_memory;
};
- void FreeBackingStore();
+ // Returns whether the buffer is tracked by the WasmMemoryTracker.
+ inline bool is_wasm_memory() const;
+
+ // Sets whether the buffer is tracked by the WasmMemoryTracker.
+ void set_is_wasm_memory(bool is_wasm_memory);
+
+ void FreeBackingStoreFromMainThread();
static void FreeBackingStore(Isolate* isolate, Allocation allocation);
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* data, size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared);
-
- V8_EXPORT_PRIVATE static void Setup(
- Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
- void* allocation_base, size_t allocation_length, void* data,
- size_t byte_length, SharedFlag shared = SharedFlag::kNotShared);
+ SharedFlag shared = SharedFlag::kNotShared, bool is_wasm_memory = false);
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
static bool SetupAllocatingData(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
size_t allocated_length, bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
+ SharedFlag shared = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
// Dispatched behavior.
DECL_PRINTER(JSArrayBuffer)
@@ -219,10 +219,7 @@ class JSArrayBuffer : public JSObject {
// The rest of the fields are not JSObjects, so they are not iterated over in
// objects-body-descriptors-inl.h.
static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
- static const int kAllocationBaseOffset = kBackingStoreOffset + kPointerSize;
- static const int kAllocationLengthOffset =
- kAllocationBaseOffset + kPointerSize;
- static const int kBitFieldSlot = kAllocationLengthOffset + kSizetSize;
+ static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kBitFieldOffset = kBitFieldSlot;
#else
@@ -243,8 +240,8 @@ class JSArrayBuffer : public JSObject {
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
class IsShared : public BitField<bool, 4, 1> {};
- class HasGuardRegion : public BitField<bool, 5, 1> {};
- class IsGrowable : public BitField<bool, 6, 1> {};
+ class IsGrowable : public BitField<bool, 5, 1> {};
+ class IsWasmMemory : public BitField<bool, 6, 1> {};
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
@@ -288,7 +285,7 @@ class JSTypedArray : public JSArrayBufferView {
inline uint32_t length_value() const;
// ES6 9.4.5.3
- MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
PropertyDescriptor* desc, ShouldThrow should_throw);
@@ -299,6 +296,9 @@ class JSTypedArray : public JSArrayBufferView {
Handle<JSArrayBuffer> GetBuffer();
+ // Whether the buffer's backing store is on-heap or off-heap.
+ inline bool is_on_heap() const;
+
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index b454084b8e..20a0a90131 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -56,13 +56,14 @@ class JSPromise : public JSObject {
static Handle<Object> Reject(Handle<JSPromise> promise, Handle<Object> reason,
bool debug_event = true);
// ES section #sec-promise-resolve-functions
- MUST_USE_RESULT static MaybeHandle<Object> Resolve(Handle<JSPromise> promise,
- Handle<Object> resolution);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Resolve(
+ Handle<JSPromise> promise, Handle<Object> resolution);
// This is a helper that extracts the JSPromise from the input
// {object}, which is used as a payload for PromiseReaction and
// PromiseReactionJobTask.
- MUST_USE_RESULT static MaybeHandle<JSPromise> From(Handle<HeapObject> object);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSPromise> From(
+ Handle<HeapObject> object);
DECL_CAST(JSPromise)
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
new file mode 100644
index 0000000000..1d6a64ec0c
--- /dev/null
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -0,0 +1,35 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_INL_H_
+#define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_INL_H_
+
+#include "src/objects/js-regexp-string-iterator.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+TYPE_CHECKER(JSRegExpStringIterator, JS_REGEXP_STRING_ITERATOR_TYPE)
+
+ACCESSORS(JSRegExpStringIterator, iterating_regexp, Object,
+ kIteratingRegExpOffset)
+ACCESSORS(JSRegExpStringIterator, iterating_string, String,
+ kIteratedStringOffset)
+
+SMI_ACCESSORS(JSRegExpStringIterator, flags, kFlagsOffset)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, done, kDoneBit)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, global, kGlobalBit)
+BOOL_ACCESSORS(JSRegExpStringIterator, flags, unicode, kUnicodeBit)
+
+CAST_ACCESSOR(JSRegExpStringIterator)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_INL_H_
diff --git a/deps/v8/src/objects/js-regexp-string-iterator.h b/deps/v8/src/objects/js-regexp-string-iterator.h
new file mode 100644
index 0000000000..9821e33efb
--- /dev/null
+++ b/deps/v8/src/objects/js-regexp-string-iterator.h
@@ -0,0 +1,59 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
+#define V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class JSRegExpStringIterator : public JSObject {
+ public:
+ // [regexp]: the [[IteratingRegExp]] internal property.
+ DECL_ACCESSORS(iterating_regexp, Object)
+
+ // [string]: The [[IteratedString]] internal property.
+ DECL_ACCESSORS(iterating_string, String)
+
+ DECL_INT_ACCESSORS(flags)
+
+ // [boolean]: The [[Done]] internal property.
+ DECL_BOOLEAN_ACCESSORS(done)
+
+ // [boolean]: The [[Global]] internal property.
+ DECL_BOOLEAN_ACCESSORS(global)
+
+ // [boolean]: The [[Unicode]] internal property.
+ DECL_BOOLEAN_ACCESSORS(unicode)
+
+ DECL_CAST(JSRegExpStringIterator)
+ DECL_PRINTER(JSRegExpStringIterator)
+ DECL_VERIFIER(JSRegExpStringIterator)
+
+ static const int kIteratingRegExpOffset = JSObject::kHeaderSize;
+ static const int kIteratedStringOffset =
+ kIteratingRegExpOffset + kPointerSize;
+ static const int kFlagsOffset = kIteratedStringOffset + kPointerSize;
+
+ static const int kSize = kFlagsOffset + kPointerSize;
+
+ static const int kDoneBit = 0;
+ static const int kGlobalBit = 1;
+ static const int kUnicodeBit = 2;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpStringIterator);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_JS_REGEXP_STRING_ITERATOR_H_
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index ab673aad80..9e0405c8cb 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -6,7 +6,7 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects-inl.h"
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index 7fb0c712f2..c5c2c765c9 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -35,6 +35,7 @@ class BoilerplateDescription : public FixedArray {
void set_backing_store_size(Isolate* isolate, int backing_store_size);
DECL_CAST(BoilerplateDescription)
+ DECL_PRINTER(BoilerplateDescription)
private:
bool has_number_of_properties() const;
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 250a998f61..5916e6b690 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -29,7 +29,7 @@ CAST_ACCESSOR(Map)
ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
ACCESSORS_CHECKED(Map, layout_descriptor, LayoutDescriptor,
kLayoutDescriptorOffset, FLAG_unbox_double_fields)
-ACCESSORS(Map, raw_transitions, Object, kTransitionsOrPrototypeInfoOffset)
+WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
// |bit_field| fields.
BIT_FIELD_ACCESSORS(Map, bit_field, has_non_instance_prototype,
@@ -49,6 +49,8 @@ BIT_FIELD_ACCESSORS(Map, bit_field, has_prototype_slot,
// |bit_field2| fields.
BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, is_in_retained_map_list,
+ Map::IsInRetainedMapListBit)
// |bit_field3| fields.
BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
@@ -660,9 +662,17 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
+ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS(Map, constructor_or_backpointer, Object,
kConstructorOrBackPointerOffset)
+bool Map::IsPrototypeValidityCellValid() const {
+ Object* validity_cell = prototype_validity_cell();
+ Object* value = validity_cell->IsSmi() ? Smi::cast(validity_cell)
+ : Cell::cast(validity_cell)->value();
+ return value == Smi::FromInt(Map::kPrototypeChainValid);
+}
+
Object* Map::GetConstructor() const {
Object* maybe_constructor = constructor_or_backpointer();
// Follow any back pointers.
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 3bc9dd17ff..4775521643 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -38,7 +38,6 @@ namespace internal {
V(JSFunction) \
V(JSObject) \
V(JSObjectFast) \
- V(JSRegExp) \
V(JSWeakCollection) \
V(Map) \
V(NativeContext) \
@@ -56,7 +55,9 @@ namespace internal {
V(Symbol) \
V(ThinString) \
V(TransitionArray) \
- V(WeakCell)
+ V(WasmInstanceObject) \
+ V(WeakCell) \
+ V(WeakFixedArray)
// For data objects, JS objects and structs along with generic visitor which
// can visit object of any size we provide visitors specialized by
@@ -116,11 +117,12 @@ typedef std::vector<Handle<Map>> MapHandles;
// | | - is_undetectable (bit 4) |
// | | - is_access_check_needed (bit 5) |
// | | - is_constructor (bit 6) |
-// | | - unused (bit 7) |
+// | | - has_prototype_slot (bit 7) |
// +----------+---------------------------------------------+
// | Byte | [bit_field2] |
// | | - is_extensible (bit 0) |
-// | | - is_prototype_map (bit 2) |
+// | | - is_prototype_map (bit 1) |
+// | | - is_in_retained_map_list (bit 2) |
// | | - elements_kind (bits 3..7) |
// +----+----------+---------------------------------------------+
// | Int | [bit_field3] |
@@ -239,10 +241,10 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
-#define MAP_BIT_FIELD2_FIELDS(V, _) \
- /* One bit is still free here. */ \
- V(IsExtensibleBit, bool, 1, _) \
- V(IsPrototypeMapBit, bool, 1, _) \
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ V(IsExtensibleBit, bool, 1, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
+ V(IsInRetainedMapListBit, bool, 1, _) \
V(ElementsKindBits, ElementsKind, 5, _)
DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
@@ -369,6 +371,10 @@ class Map : public HeapObject {
DECL_BOOLEAN_ACCESSORS(is_prototype_map)
inline bool is_abandoned_prototype_map() const;
+ // Whether the instance has been added to the retained map list by
+ // Heap::AddRetainedMap.
+ DECL_BOOLEAN_ACCESSORS(is_in_retained_map_list)
+
DECL_PRIMITIVE_ACCESSORS(elements_kind, ElementsKind)
// Tells whether the instance has fast elements that are only Smis.
@@ -399,7 +405,7 @@ class Map : public HeapObject {
// [raw_transitions]: Provides access to the transitions storage field.
// Don't call set_raw_transitions() directly to overwrite transitions, use
// the TransitionArray::ReplaceTransitions() wrapper instead!
- DECL_ACCESSORS(raw_transitions, Object)
+ DECL_ACCESSORS(raw_transitions, MaybeObject)
// [prototype_info]: Per-prototype metadata. Aliased with transitions
// (which prototype maps don't have).
DECL_ACCESSORS(prototype_info, Object)
@@ -414,11 +420,13 @@ class Map : public HeapObject {
Isolate* isolate);
// [prototype chain validity cell]: Associated with a prototype object,
- // stored in that object's map's PrototypeInfo, indicates that prototype
- // chains through this object are currently valid. The cell will be
- // invalidated and replaced when the prototype chain changes.
- static Handle<Cell> GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
- Isolate* isolate);
+ // stored in that object's map, indicates that prototype chains through this
+ // object are currently valid. The cell will be invalidated and replaced when
+ // the prototype chain changes. When there's nothing to guard (for example,
+ // when direct prototype is null or Proxy) this function returns Smi with
+ // |kPrototypeChainValid| sentinel value.
+ static Handle<Object> GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
+ Isolate* isolate);
static const int kPrototypeChainValid = 0;
static const int kPrototypeChainInvalid = 1;
@@ -458,7 +466,7 @@ class Map : public HeapObject {
int* old_number_of_fields) const;
// TODO(ishell): moveit!
static Handle<Map> GeneralizeAllFields(Handle<Map> map);
- MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
+ V8_WARN_UNUSED_RESULT static Handle<FieldType> GeneralizeFieldType(
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
static void GeneralizeField(Handle<Map> map, int modify_index,
@@ -559,6 +567,24 @@ class Map : public HeapObject {
// [weak cell cache]: cache that stores a weak cell pointing to this map.
DECL_ACCESSORS(weak_cell_cache, Object)
+ // [prototype_validity_cell]: Cell containing the validity bit for prototype
+ // chains or Smi(0) if uninitialized.
+ // The meaning of this validity cell is different for prototype maps and
+ // non-prototype maps.
+ // For prototype maps the validity bit "guards" modifications of prototype
+ // chains going through this object. When a prototype object changes, both its
+ // own validity cell and those of all "downstream" prototypes are invalidated;
+ // handlers for a given receiver embed the currently valid cell for that
+ // receiver's prototype during their creation and check it on execution.
+ // For non-prototype maps which are used as transitioning store handlers this
+ // field contains the validity cell which guards modifications of this map's
+ // prototype.
+ DECL_ACCESSORS(prototype_validity_cell, Object)
+
+ // Returns true if prototype validity cell value represents "valid" prototype
+ // chain state.
+ inline bool IsPrototypeValidityCellValid() const;
+
inline PropertyDetails GetLastDescriptorDetails() const;
inline int LastAdded() const;
@@ -600,7 +626,7 @@ class Map : public HeapObject {
// is found by re-transitioning from the root of the transition tree using the
// descriptor array of the map. Returns MaybeHandle<Map>() if no updated map
// is found.
- static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
+ static MaybeHandle<Map> TryUpdate(Handle<Map> map) V8_WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
@@ -622,12 +648,12 @@ class Map : public HeapObject {
static Handle<Object> WrapFieldType(Handle<FieldType> type);
static FieldType* UnwrapFieldType(Object* wrapped_type);
- MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithField(
Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
PropertyAttributes attributes, PropertyConstness constness,
Representation representation, TransitionFlag flag);
- MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Map> CopyWithConstant(
Handle<Map> map, Handle<Name> name, Handle<Object> constant,
PropertyAttributes attributes, TransitionFlag flag);
@@ -655,10 +681,12 @@ class Map : public HeapObject {
// transitions to avoid an explosion in the number of maps for objects used as
// dictionaries.
inline bool TooManyFastProperties(StoreFromKeyed store_mode) const;
- static Handle<Map> TransitionToDataProperty(
- Handle<Map> map, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes, PropertyConstness constness,
- StoreFromKeyed store_mode, bool* created_new_map);
+ static Handle<Map> TransitionToDataProperty(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ StoreFromKeyed store_mode);
static Handle<Map> TransitionToAccessorProperty(
Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
Handle<Object> getter, Handle<Object> setter,
@@ -773,6 +801,7 @@ class Map : public HeapObject {
V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kPointerSize : 0) \
V(kDependentCodeOffset, kPointerSize) \
V(kWeakCellCacheOffset, kPointerSize) \
+ V(kPrototypeValidityCellOffset, kPointerSize) \
V(kPointerFieldsEndOffset, 0) \
/* Total size. */ \
V(kSize, 0)
@@ -782,9 +811,7 @@ class Map : public HeapObject {
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset, kSize>
- BodyDescriptor;
+ class BodyDescriptor;
// Compares this map to another to see if they describe equivalent objects.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
@@ -873,7 +900,7 @@ class Map : public HeapObject {
Handle<DescriptorArray> descriptors,
Descriptor* descriptor, int index,
TransitionFlag flag);
- static MUST_USE_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
Handle<Map> map, int descriptor, PropertyKind kind,
PropertyAttributes attributes, const char** reason);
@@ -925,8 +952,8 @@ class NormalizedMapCache : public FixedArray {
public:
static Handle<NormalizedMapCache> New(Isolate* isolate);
- MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
- PropertyNormalizationMode mode);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
+ PropertyNormalizationMode mode);
void Set(Handle<Map> fast_map, Handle<Map> normalized_map,
Handle<WeakCell> normalized_map_weak_cell);
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
new file mode 100644
index 0000000000..088a8c831e
--- /dev/null
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -0,0 +1,92 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MAYBE_OBJECT_INL_H_
+#define V8_OBJECTS_MAYBE_OBJECT_INL_H_
+
+#include "include/v8.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+bool MaybeObject::ToSmi(Smi** value) {
+ if (HAS_SMI_TAG(this)) {
+ *value = Smi::cast(reinterpret_cast<Object*>(this));
+ return true;
+ }
+ return false;
+}
+
+bool MaybeObject::IsStrongOrWeakHeapObject() {
+ if (IsSmi() || IsClearedWeakHeapObject()) {
+ return false;
+ }
+ return true;
+}
+
+bool MaybeObject::ToStrongOrWeakHeapObject(HeapObject** result) {
+ if (IsSmi() || IsClearedWeakHeapObject()) {
+ return false;
+ }
+ *result = GetHeapObject();
+ return true;
+}
+
+bool MaybeObject::ToStrongOrWeakHeapObject(
+ HeapObject** result, HeapObjectReferenceType* reference_type) {
+ if (IsSmi() || IsClearedWeakHeapObject()) {
+ return false;
+ }
+ *reference_type = HasWeakHeapObjectTag(this)
+ ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+ *result = GetHeapObject();
+ return true;
+}
+
+bool MaybeObject::IsStrongHeapObject() {
+ return !HasWeakHeapObjectTag(this) && !IsSmi();
+}
+
+bool MaybeObject::ToStrongHeapObject(HeapObject** result) {
+ if (!HasWeakHeapObjectTag(this) && !IsSmi()) {
+ *result = reinterpret_cast<HeapObject*>(this);
+ return true;
+ }
+ return false;
+}
+
+HeapObject* MaybeObject::ToStrongHeapObject() {
+ DCHECK(IsStrongHeapObject());
+ return reinterpret_cast<HeapObject*>(this);
+}
+
+bool MaybeObject::IsWeakHeapObject() {
+ return HasWeakHeapObjectTag(this) && !IsClearedWeakHeapObject();
+}
+
+bool MaybeObject::ToWeakHeapObject(HeapObject** result) {
+ if (HasWeakHeapObjectTag(this) && !IsClearedWeakHeapObject()) {
+ *result = GetHeapObject();
+ return true;
+ }
+ return false;
+}
+
+HeapObject* MaybeObject::ToWeakHeapObject() {
+ DCHECK(IsWeakHeapObject());
+ return GetHeapObject();
+}
+
+HeapObject* MaybeObject::GetHeapObject() {
+ DCHECK(!IsSmi());
+ DCHECK(!IsClearedWeakHeapObject());
+ return RemoveWeakHeapObjectMask(reinterpret_cast<HeapObjectReference*>(this));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_MAYBE_OBJECT_INL_H_
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
new file mode 100644
index 0000000000..f9f080ba31
--- /dev/null
+++ b/deps/v8/src/objects/maybe-object.h
@@ -0,0 +1,108 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MAYBE_OBJECT_H_
+#define V8_OBJECTS_MAYBE_OBJECT_H_
+
+#include "include/v8.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject;
+class Smi;
+
+// A MaybeObject is either a SMI, a strong reference to a HeapObject, a weak
+// reference to a HeapObject, or a cleared weak reference. It's used for
+// implementing in-place weak references (see design doc: goo.gl/j6SdcK )
+class MaybeObject {
+ public:
+ bool IsSmi() const { return HAS_SMI_TAG(this); }
+ inline bool ToSmi(Smi** value);
+
+ bool IsClearedWeakHeapObject() {
+ return ::v8::internal::IsClearedWeakHeapObject(this);
+ }
+
+ inline bool IsStrongOrWeakHeapObject();
+ inline bool ToStrongOrWeakHeapObject(HeapObject** result);
+ inline bool ToStrongOrWeakHeapObject(HeapObject** result,
+ HeapObjectReferenceType* reference_type);
+ inline bool IsStrongHeapObject();
+ inline bool ToStrongHeapObject(HeapObject** result);
+ inline HeapObject* ToStrongHeapObject();
+ inline bool IsWeakHeapObject();
+ inline bool ToWeakHeapObject(HeapObject** result);
+ inline HeapObject* ToWeakHeapObject();
+
+ inline HeapObject* GetHeapObject();
+
+ static MaybeObject* FromSmi(Smi* smi) {
+ DCHECK(HAS_SMI_TAG(smi));
+ return reinterpret_cast<MaybeObject*>(smi);
+ }
+
+ static MaybeObject* FromObject(Object* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return reinterpret_cast<MaybeObject*>(object);
+ }
+
+ static MaybeObject* MakeWeak(MaybeObject* object) {
+ DCHECK(object->IsStrongOrWeakHeapObject());
+ return AddWeakHeapObjectMask(object);
+ }
+
+#ifdef VERIFY_HEAP
+ static void VerifyMaybeObjectPointer(MaybeObject* p);
+#endif
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MaybeObject);
+};
+
+// A HeapObjectReference is either a strong reference to a HeapObject, a weak
+// reference to a HeapObject, or a cleared weak reference.
+class HeapObjectReference : public MaybeObject {
+ public:
+ static HeapObjectReference* Strong(HeapObject* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return reinterpret_cast<HeapObjectReference*>(object);
+ }
+
+ static HeapObjectReference* Weak(HeapObject* object) {
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return AddWeakHeapObjectMask(object);
+ }
+
+ static HeapObjectReference* ClearedValue() {
+ return reinterpret_cast<HeapObjectReference*>(kClearedWeakHeapObject);
+ }
+
+ static void Update(HeapObjectReference** slot, HeapObject* value) {
+ DCHECK(!HAS_SMI_TAG(*slot));
+ DCHECK(Internals::HasHeapObjectTag(value));
+
+#ifdef DEBUG
+ bool weak_before = HasWeakHeapObjectTag(*slot);
+#endif
+
+ *slot = reinterpret_cast<HeapObjectReference*>(
+ reinterpret_cast<intptr_t>(value) |
+ (reinterpret_cast<intptr_t>(*slot) & kWeakHeapObjectMask));
+
+#ifdef DEBUG
+ bool weak_after = HasWeakHeapObjectTag(*slot);
+ DCHECK_EQ(weak_before, weak_after);
+#endif
+ }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObjectReference);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_MAYBE_OBJECT_H_
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index b9d7697fb5..e2472616f6 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -900,5 +900,28 @@ MaybeHandle<Object> JSModuleNamespace::GetExport(Handle<String> name) {
return value;
}
+Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
+ LookupIterator* it) {
+ Handle<JSModuleNamespace> object = it->GetHolder<JSModuleNamespace>();
+ Handle<String> name = Handle<String>::cast(it->GetName());
+ DCHECK_EQ(it->state(), LookupIterator::ACCESSOR);
+
+ Isolate* isolate = name->GetIsolate();
+
+ Handle<Object> lookup(object->module()->exports()->Lookup(name), isolate);
+ if (lookup->IsTheHole(isolate)) {
+ return Just(ABSENT);
+ }
+
+ Handle<Object> value(Handle<Cell>::cast(lookup)->value(), isolate);
+ if (value->IsTheHole(isolate)) {
+ isolate->Throw(*isolate->factory()->NewReferenceError(
+ MessageTemplate::kNotDefined, name));
+ return Nothing<PropertyAttributes>();
+ }
+
+ return Just(it->property_attributes());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 9cf3bc4d2a..613304ae78 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -90,12 +90,13 @@ class Module : public Struct {
// Returns false if an exception occurred during instantiation, true
// otherwise. (In the case where the callback throws an exception, that
// exception is propagated.)
- static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
- v8::Local<v8::Context> context,
- v8::Module::ResolveCallback callback);
+ static V8_WARN_UNUSED_RESULT bool Instantiate(
+ Handle<Module> module, v8::Local<v8::Context> context,
+ v8::Module::ResolveCallback callback);
// Implementation of spec operation ModuleEvaluation.
- static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
+ Handle<Module> module);
Cell* GetCell(int cell_index);
static Handle<Object> LoadVariable(Handle<Module> module, int cell_index);
@@ -152,28 +153,28 @@ class Module : public Struct {
// [must_resolve] is false, a null result may or may not indicate an
// exception (so check manually!).
class ResolveSet;
- static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExport(
Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
- static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveImport(
Handle<Module> module, Handle<String> name, int module_request,
MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
- static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
Handle<Module> module, Handle<String> module_specifier,
Handle<String> export_name, MessageLocation loc, bool must_resolve,
ResolveSet* resolve_set);
- static MUST_USE_RESULT bool PrepareInstantiate(
+ static V8_WARN_UNUSED_RESULT bool PrepareInstantiate(
Handle<Module> module, v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
- static MUST_USE_RESULT bool FinishInstantiate(
+ static V8_WARN_UNUSED_RESULT bool FinishInstantiate(
Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index, Zone* zone);
static void RunInitializationCode(Handle<Module> module);
- static MUST_USE_RESULT MaybeHandle<Object> Evaluate(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> Evaluate(
Handle<Module> module, ZoneForwardList<Handle<Module>>* stack,
unsigned* dfs_index);
@@ -213,7 +214,13 @@ class JSModuleNamespace : public JSObject {
// Retrieve the value exported by [module] under the given [name]. If there is
// no such export, return Just(undefined). If the export is uninitialized,
// schedule an exception and return Nothing.
- MUST_USE_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+ V8_WARN_UNUSED_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+
+ // Return the (constant) property attributes for the referenced property,
+ // which is assumed to correspond to an export. If the export is
+ // uninitialized, schedule an exception and return Nothing.
+ static V8_WARN_UNUSED_RESULT Maybe<PropertyAttributes> GetPropertyAttributes(
+ LookupIterator* it);
// In-object fields.
enum {
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index e5cfe7733b..091eb4c641 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -54,8 +54,9 @@ class Name : public HeapObject {
// Return a string version of this name that is converted according to the
// rules described in ES6 section 9.2.11.
- MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
- MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
+ Handle<Name> name);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> ToFunctionName(
Handle<Name> name, Handle<String> prefix);
DECL_CAST(Name)
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index d8ca9355ad..ea515b61ed 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -14,6 +14,9 @@
#undef ACCESSORS_CHECKED2
#undef ACCESSORS_CHECKED
#undef ACCESSORS
+#undef WEAK_ACCESSORS_CHECKED
+#undef WEAK_ACCESSORS_CHECKED2
+#undef WEAK_ACCESSORS
#undef SMI_ACCESSORS_CHECKED
#undef SMI_ACCESSORS
#undef SYNCHRONIZED_SMI_ACCESSORS
@@ -25,13 +28,16 @@
#undef FIELD_ADDR
#undef FIELD_ADDR_CONST
#undef READ_FIELD
+#undef READ_WEAK_FIELD
#undef ACQUIRE_READ_FIELD
#undef RELAXED_READ_FIELD
#undef WRITE_FIELD
+#undef WRITE_WEAK_FIELD
#undef RELEASE_WRITE_FIELD
#undef RELAXED_WRITE_FIELD
#undef WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
+#undef CONDITIONAL_WEAK_WRITE_BARRIER
#undef READ_DOUBLE_FIELD
#undef WRITE_DOUBLE_FIELD
#undef READ_INT_FIELD
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 52835bce9b..03a1196703 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -71,6 +71,25 @@
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
+#define WEAK_ACCESSORS_CHECKED2(holder, name, offset, get_condition, \
+ set_condition) \
+ MaybeObject* holder::name() const { \
+ MaybeObject* value = READ_WEAK_FIELD(this, offset); \
+ DCHECK(get_condition); \
+ return value; \
+ } \
+ void holder::set_##name(MaybeObject* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_WEAK_FIELD(this, offset, value); \
+ CONDITIONAL_WEAK_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+ }
+
+#define WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
+ WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, condition)
+
+#define WEAK_ACCESSORS(holder, name, offset) \
+ WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
+
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
int holder::name() const { \
@@ -135,6 +154,9 @@
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
+#define READ_WEAK_FIELD(p, offset) \
+ (*reinterpret_cast<MaybeObject* const*>(FIELD_ADDR_CONST(p, offset)))
+
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
@@ -143,14 +165,24 @@
reinterpret_cast<Object*>(base::Relaxed_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
+#define RELAXED_READ_WEAK_FIELD(p, offset) \
+ reinterpret_cast<MaybeObject*>(base::Relaxed_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
+
#ifdef V8_CONCURRENT_MARKING
#define WRITE_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
+#define WRITE_WEAK_FIELD(p, offset, value) \
+ base::Relaxed_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
#else
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+#define WRITE_WEAK_FIELD(p, offset, value) \
+ (*reinterpret_cast<MaybeObject**>(FIELD_ADDR(p, offset)) = value)
#endif
#define RELEASE_WRITE_FIELD(p, offset, value) \
@@ -168,6 +200,12 @@
object, HeapObject::RawField(object, offset), value); \
heap->RecordWrite(object, HeapObject::RawField(object, offset), value);
+#define WEAK_WRITE_BARRIER(heap, object, offset, value) \
+ heap->incremental_marking()->RecordMaybeWeakWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
+ value);
+
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
@@ -177,6 +215,16 @@
heap->RecordWrite(object, HeapObject::RawField(object, offset), value); \
}
+#define CONDITIONAL_WEAK_WRITE_BARRIER(heap, object, offset, value, mode) \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ heap->incremental_marking()->RecordMaybeWeakWrite( \
+ object, HeapObject::RawMaybeWeakField(object, offset), value); \
+ } \
+ heap->RecordWrite(object, HeapObject::RawMaybeWeakField(object, offset), \
+ value); \
+ }
+
#define READ_DOUBLE_FIELD(p, offset) \
ReadDoubleValue(FIELD_ADDR_CONST(p, offset))
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index d199d7f6ec..0b517d7038 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -106,34 +106,48 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
receiver_info = NONE;
}
- bool has_new_target =
+ const bool has_new_target =
scope->is_declaration_scope() &&
scope->AsDeclarationScope()->new_target_var() != nullptr;
+ // TODO(cbruni): Don't always waste a field for the inferred name.
+ const bool has_inferred_function_name = scope->is_function_scope();
// Determine use and location of the function variable if it is present.
VariableAllocationInfo function_name_info;
- if (scope->is_function_scope() &&
- scope->AsDeclarationScope()->function_var() != nullptr) {
- Variable* var = scope->AsDeclarationScope()->function_var();
- if (!var->is_used()) {
- function_name_info = UNUSED;
- } else if (var->IsContextSlot()) {
- function_name_info = CONTEXT;
+ if (scope->is_function_scope()) {
+ if (scope->AsDeclarationScope()->function_var() != nullptr) {
+ Variable* var = scope->AsDeclarationScope()->function_var();
+ if (!var->is_used()) {
+ function_name_info = UNUSED;
+ } else if (var->IsContextSlot()) {
+ function_name_info = CONTEXT;
+ } else {
+ DCHECK(var->IsStackLocal());
+ function_name_info = STACK;
+ }
} else {
- DCHECK(var->IsStackLocal());
- function_name_info = STACK;
+ // Always reserve space for the debug name in the scope info.
+ function_name_info = UNUSED;
}
+ } else if (scope->is_module_scope() || scope->is_script_scope() ||
+ scope->is_eval_scope()) {
+ // Always reserve space for the debug name in the scope info.
+ function_name_info = UNUSED;
} else {
function_name_info = NONE;
}
const bool has_function_name = function_name_info != NONE;
+ const bool has_position_info = NeedsPositionInfo(scope->scope_type());
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
const int parameter_count = scope->num_parameters();
const bool has_outer_scope_info = !outer_scope.is_null();
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
- (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
+ (has_receiver ? 1 : 0) +
+ (has_function_name ? kFunctionNameEntries : 0) +
+ (has_inferred_function_name ? 1 : 0) +
+ (has_position_info ? kPositionInfoEntries : 0) +
(has_outer_scope_info ? 1 : 0) +
(scope->is_module_scope()
? 2 + kModuleVariableEntryLength * module_vars_count
@@ -165,6 +179,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
ReceiverVariableField::encode(receiver_info) |
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
+ HasInferredFunctionNameField::encode(has_inferred_function_name) |
AsmModuleField::encode(asm_module) |
HasSimpleParametersField::encode(has_simple_parameters) |
FunctionKindField::encode(function_kind) |
@@ -255,14 +270,32 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
// If present, add the function variable name and its index.
DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
if (has_function_name) {
- int var_index = scope->AsDeclarationScope()->function_var()->index();
- scope_info->set(index++,
- *scope->AsDeclarationScope()->function_var()->name());
+ DisallowHeapAllocation no_gc;
+ Variable* var = scope->AsDeclarationScope()->function_var();
+ int var_index = -1;
+ Object* name = Smi::kZero;
+ if (var != nullptr) {
+ var_index = var->index();
+ name = *var->name();
+ }
+ scope_info->set(index++, name);
scope_info->set(index++, Smi::FromInt(var_index));
DCHECK(function_name_info != CONTEXT ||
var_index == scope_info->ContextLength() - 1);
}
+ DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ if (has_inferred_function_name) {
+ // The inferred function name is taken from the SFI.
+ index++;
+ }
+
+ DCHECK_EQ(index, scope_info->PositionInfoIndex());
+ if (has_position_info) {
+ scope_info->set(index++, Smi::FromInt(scope->start_position()));
+ scope_info->set(index++, Smi::FromInt(scope->end_position()));
+ }
+
// If present, add the outer scope info.
DCHECK(index == scope_info->OuterScopeInfoIndex());
if (has_outer_scope_info) {
@@ -320,6 +353,8 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ DCHECK_EQ(index, scope_info->PositionInfoIndex());
DCHECK(index == scope_info->OuterScopeInfoIndex());
if (has_outer_scope_info) {
scope_info->set(index++, *outer_scope.ToHandleChecked());
@@ -335,17 +370,15 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
const int stack_local_count = 0;
const int context_local_count = 1;
- const bool has_simple_parameters = true;
const VariableAllocationInfo receiver_info = CONTEXT;
const VariableAllocationInfo function_name_info = NONE;
- const bool has_function_name = false;
const bool has_receiver = true;
- const bool has_outer_scope_info = false;
+ const bool has_position_info = true;
const int parameter_count = 0;
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
- (has_receiver ? 1 : 0) + (has_function_name ? 2 : 0) +
- (has_outer_scope_info ? 1 : 0);
+ (has_receiver ? 1 : 0) +
+ (has_position_info ? kPositionInfoEntries : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -358,9 +391,9 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
ReceiverVariableField::encode(receiver_info) |
FunctionVariableField::encode(function_name_info) |
AsmModuleField::encode(false) |
- HasSimpleParametersField::encode(has_simple_parameters) |
+ HasSimpleParametersField::encode(true) |
FunctionKindField::encode(FunctionKind::kNormalFunction) |
- HasOuterScopeInfoField::encode(has_outer_scope_info) |
+ HasOuterScopeInfoField::encode(false) |
IsDebugEvaluateScopeField::encode(false);
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
@@ -388,6 +421,11 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
scope_info->set(index++, Smi::FromInt(receiver_index));
DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ DCHECK_EQ(index, scope_info->PositionInfoIndex());
+ // Store dummy position to be in sync with the {scope_type}.
+ scope_info->set(index++, Smi::kZero);
+ scope_info->set(index++, Smi::kZero);
DCHECK_EQ(index, scope_info->OuterScopeInfoIndex());
DCHECK_EQ(index, scope_info->length());
DCHECK_EQ(scope_info->ParameterCount(), 0);
@@ -400,12 +438,12 @@ ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
return isolate->heap()->empty_scope_info();
}
-ScopeType ScopeInfo::scope_type() {
+ScopeType ScopeInfo::scope_type() const {
DCHECK_LT(0, length());
return ScopeTypeField::decode(Flags());
}
-bool ScopeInfo::CallsSloppyEval() {
+bool ScopeInfo::CallsSloppyEval() const {
bool calls_sloppy_eval =
length() > 0 && CallsSloppyEvalField::decode(Flags());
DCHECK_IMPLIES(calls_sloppy_eval, is_sloppy(language_mode()));
@@ -413,18 +451,20 @@ bool ScopeInfo::CallsSloppyEval() {
return calls_sloppy_eval;
}
-LanguageMode ScopeInfo::language_mode() {
+LanguageMode ScopeInfo::language_mode() const {
return length() > 0 ? LanguageModeField::decode(Flags())
: LanguageMode::kSloppy;
}
-bool ScopeInfo::is_declaration_scope() {
+bool ScopeInfo::is_declaration_scope() const {
return DeclarationScopeField::decode(Flags());
}
-int ScopeInfo::LocalCount() { return StackLocalCount() + ContextLocalCount(); }
+int ScopeInfo::LocalCount() const {
+ return StackLocalCount() + ContextLocalCount();
+}
-int ScopeInfo::StackSlotCount() {
+int ScopeInfo::StackSlotCount() const {
if (length() > 0) {
bool function_name_stack_slot =
FunctionVariableField::decode(Flags()) == STACK;
@@ -433,7 +473,7 @@ int ScopeInfo::StackSlotCount() {
return 0;
}
-int ScopeInfo::ContextLength() {
+int ScopeInfo::ContextLength() const {
if (length() > 0) {
int context_locals = ContextLocalCount();
bool function_name_context_slot =
@@ -454,47 +494,65 @@ int ScopeInfo::ContextLength() {
return 0;
}
-bool ScopeInfo::HasReceiver() {
- if (length() > 0) {
- return NONE != ReceiverVariableField::decode(Flags());
- } else {
- return false;
- }
+bool ScopeInfo::HasReceiver() const {
+ if (length() == 0) return false;
+ return NONE != ReceiverVariableField::decode(Flags());
}
-bool ScopeInfo::HasAllocatedReceiver() {
- if (length() > 0) {
- VariableAllocationInfo allocation = ReceiverVariableField::decode(Flags());
- return allocation == STACK || allocation == CONTEXT;
- } else {
- return false;
- }
+bool ScopeInfo::HasAllocatedReceiver() const {
+ if (length() == 0) return false;
+ VariableAllocationInfo allocation = ReceiverVariableField::decode(Flags());
+ return allocation == STACK || allocation == CONTEXT;
}
-bool ScopeInfo::HasNewTarget() { return HasNewTargetField::decode(Flags()); }
+bool ScopeInfo::HasNewTarget() const {
+ return HasNewTargetField::decode(Flags());
+}
-bool ScopeInfo::HasFunctionName() {
- if (length() > 0) {
- return NONE != FunctionVariableField::decode(Flags());
- } else {
- return false;
- }
+bool ScopeInfo::HasFunctionName() const {
+ if (length() == 0) return false;
+ return NONE != FunctionVariableField::decode(Flags());
}
-bool ScopeInfo::HasOuterScopeInfo() {
- if (length() > 0) {
- return HasOuterScopeInfoField::decode(Flags());
- } else {
- return false;
- }
+bool ScopeInfo::HasInferredFunctionName() const {
+ if (length() == 0) return false;
+ return HasInferredFunctionNameField::decode(Flags());
}
-bool ScopeInfo::IsDebugEvaluateScope() {
- if (length() > 0) {
- return IsDebugEvaluateScopeField::decode(Flags());
- } else {
- return false;
- }
+bool ScopeInfo::HasPositionInfo() const {
+ if (length() == 0) return false;
+ return NeedsPositionInfo(scope_type());
+}
+
+// static
+bool ScopeInfo::NeedsPositionInfo(ScopeType type) {
+ return type == FUNCTION_SCOPE || type == SCRIPT_SCOPE || type == EVAL_SCOPE ||
+ type == MODULE_SCOPE;
+}
+
+bool ScopeInfo::HasSharedFunctionName() const {
+ return FunctionName() != SharedFunctionInfo::kNoSharedNameSentinel;
+}
+
+void ScopeInfo::SetFunctionName(Object* name) {
+ DCHECK(HasFunctionName());
+ DCHECK(name->IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
+ set(FunctionNameInfoIndex(), name);
+}
+
+void ScopeInfo::SetInferredFunctionName(String* name) {
+ DCHECK(HasInferredFunctionName());
+ set(InferredFunctionNameIndex(), name);
+}
+
+bool ScopeInfo::HasOuterScopeInfo() const {
+ if (length() == 0) return false;
+ return HasOuterScopeInfoField::decode(Flags());
+}
+
+bool ScopeInfo::IsDebugEvaluateScope() const {
+ if (length() == 0) return false;
+ return IsDebugEvaluateScopeField::decode(Flags());
}
void ScopeInfo::SetIsDebugEvaluateScope() {
@@ -506,31 +564,53 @@ void ScopeInfo::SetIsDebugEvaluateScope() {
}
}
-bool ScopeInfo::HasContext() { return ContextLength() > 0; }
+bool ScopeInfo::HasContext() const { return ContextLength() > 0; }
-String* ScopeInfo::FunctionName() {
+Object* ScopeInfo::FunctionName() const {
DCHECK(HasFunctionName());
- return String::cast(get(FunctionNameInfoIndex()));
+ return get(FunctionNameInfoIndex());
}
-ScopeInfo* ScopeInfo::OuterScopeInfo() {
+Object* ScopeInfo::InferredFunctionName() const {
+ DCHECK(HasInferredFunctionName());
+ return get(InferredFunctionNameIndex());
+}
+
+int ScopeInfo::StartPosition() const {
+ DCHECK(HasPositionInfo());
+ return Smi::cast(get(PositionInfoIndex()))->value();
+}
+
+int ScopeInfo::EndPosition() const {
+ DCHECK(HasPositionInfo());
+ return Smi::cast(get(PositionInfoIndex() + 1))->value();
+}
+
+void ScopeInfo::SetPositionInfo(int start, int end) {
+ DCHECK(HasPositionInfo());
+ DCHECK_LE(start, end);
+ set(PositionInfoIndex(), Smi::FromInt(start));
+ set(PositionInfoIndex() + 1, Smi::FromInt(end));
+}
+
+ScopeInfo* ScopeInfo::OuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
return ScopeInfo::cast(get(OuterScopeInfoIndex()));
}
-ModuleInfo* ScopeInfo::ModuleDescriptorInfo() {
+ModuleInfo* ScopeInfo::ModuleDescriptorInfo() const {
DCHECK(scope_type() == MODULE_SCOPE);
return ModuleInfo::cast(get(ModuleInfoIndex()));
}
-String* ScopeInfo::ParameterName(int var) {
+String* ScopeInfo::ParameterName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ParameterCount());
int info_index = ParameterNamesIndex() + var;
return String::cast(get(info_index));
}
-String* ScopeInfo::LocalName(int var) {
+String* ScopeInfo::LocalName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, LocalCount());
DCHECK(StackLocalNamesIndex() + StackLocalCount() ==
@@ -539,28 +619,28 @@ String* ScopeInfo::LocalName(int var) {
return String::cast(get(info_index));
}
-String* ScopeInfo::StackLocalName(int var) {
+String* ScopeInfo::StackLocalName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, StackLocalCount());
int info_index = StackLocalNamesIndex() + var;
return String::cast(get(info_index));
}
-int ScopeInfo::StackLocalIndex(int var) {
+int ScopeInfo::StackLocalIndex(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, StackLocalCount());
int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
return first_slot_index + var;
}
-String* ScopeInfo::ContextLocalName(int var) {
+String* ScopeInfo::ContextLocalName(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalNamesIndex() + var;
return String::cast(get(info_index));
}
-VariableMode ScopeInfo::ContextLocalMode(int var) {
+VariableMode ScopeInfo::ContextLocalMode(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
@@ -568,7 +648,7 @@ VariableMode ScopeInfo::ContextLocalMode(int var) {
return VariableModeField::decode(value);
}
-InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
+InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
@@ -576,7 +656,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
return InitFlagField::decode(value);
}
-MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
+MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) const {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
int info_index = ContextLocalInfosIndex() + var;
@@ -584,6 +664,7 @@ MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
return MaybeAssignedFlagField::decode(value);
}
+// static
bool ScopeInfo::VariableIsSynthetic(String* name) {
// There's currently no flag stored on the ScopeInfo to indicate that a
// variable is a compiler-introduced temporary. However, to avoid conflict
@@ -593,16 +674,15 @@ bool ScopeInfo::VariableIsSynthetic(String* name) {
name->Equals(name->GetHeap()->this_string());
}
-int ScopeInfo::StackSlotIndex(String* name) {
+int ScopeInfo::StackSlotIndex(String* name) const {
DCHECK(name->IsInternalizedString());
- if (length() > 0) {
- int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
- int start = StackLocalNamesIndex();
- int end = start + StackLocalCount();
- for (int i = start; i < end; ++i) {
- if (name == get(i)) {
- return i - start + first_slot_index;
- }
+ if (length() == 0) return -1;
+ int first_slot_index = Smi::ToInt(get(StackLocalFirstSlotIndex()));
+ int start = StackLocalNamesIndex();
+ int end = start + StackLocalCount();
+ for (int i = start; i < end; ++i) {
+ if (name == get(i)) {
+ return i - start + first_slot_index;
}
}
return -1;
@@ -631,6 +711,7 @@ int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
return 0;
}
+// static
int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
Handle<String> name, VariableMode* mode,
InitializationFlag* init_flag,
@@ -640,66 +721,66 @@ int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- if (scope_info->length() > 0) {
- ContextSlotCache* context_slot_cache =
- scope_info->GetIsolate()->context_slot_cache();
- int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
- maybe_assigned_flag);
- if (result != ContextSlotCache::kNotFound) {
+ if (scope_info->length() == 0) return -1;
+
+ ContextSlotCache* context_slot_cache =
+ scope_info->GetIsolate()->context_slot_cache();
+ int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
+ maybe_assigned_flag);
+ if (result != ContextSlotCache::kNotFound) {
+ DCHECK_LT(result, scope_info->ContextLength());
+ return result;
+ }
+
+ int start = scope_info->ContextLocalNamesIndex();
+ int end = start + scope_info->ContextLocalCount();
+ for (int i = start; i < end; ++i) {
+ if (*name == scope_info->get(i)) {
+ int var = i - start;
+ *mode = scope_info->ContextLocalMode(var);
+ *init_flag = scope_info->ContextLocalInitFlag(var);
+ *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
+ result = Context::MIN_CONTEXT_SLOTS + var;
+
+ context_slot_cache->Update(scope_info, name, *mode, *init_flag,
+ *maybe_assigned_flag, result);
DCHECK_LT(result, scope_info->ContextLength());
return result;
}
-
- int start = scope_info->ContextLocalNamesIndex();
- int end = start + scope_info->ContextLocalCount();
- for (int i = start; i < end; ++i) {
- if (*name == scope_info->get(i)) {
- int var = i - start;
- *mode = scope_info->ContextLocalMode(var);
- *init_flag = scope_info->ContextLocalInitFlag(var);
- *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
- result = Context::MIN_CONTEXT_SLOTS + var;
-
- context_slot_cache->Update(scope_info, name, *mode, *init_flag,
- *maybe_assigned_flag, result);
- DCHECK_LT(result, scope_info->ContextLength());
- return result;
- }
- }
- // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
- context_slot_cache->Update(scope_info, name, TEMPORARY,
- kNeedsInitialization, kNotAssigned, -1);
}
+ // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
+ context_slot_cache->Update(scope_info, name, TEMPORARY, kNeedsInitialization,
+ kNotAssigned, -1);
return -1;
}
-int ScopeInfo::ParameterIndex(String* name) {
+int ScopeInfo::ParameterIndex(String* name) const {
DCHECK(name->IsInternalizedString());
- if (length() > 0) {
- // We must read parameters from the end since for
- // multiply declared parameters the value of the
- // last declaration of that parameter is used
- // inside a function (and thus we need to look
- // at the last index). Was bug# 1110337.
- int start = ParameterNamesIndex();
- int end = start + ParameterCount();
- for (int i = end - 1; i >= start; --i) {
- if (name == get(i)) {
- return i - start;
- }
+ if (length() == 0) return -1;
+ // We must read parameters from the end since for
+ // multiply declared parameters the value of the
+ // last declaration of that parameter is used
+ // inside a function (and thus we need to look
+ // at the last index). Was bug# 1110337.
+ int start = ParameterNamesIndex();
+ int end = start + ParameterCount();
+ for (int i = end - 1; i >= start; --i) {
+ if (name == get(i)) {
+ return i - start;
}
}
return -1;
}
-int ScopeInfo::ReceiverContextSlotIndex() {
- if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
+int ScopeInfo::ReceiverContextSlotIndex() const {
+ if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT) {
return Smi::ToInt(get(ReceiverInfoIndex()));
+ }
return -1;
}
-int ScopeInfo::FunctionContextSlotIndex(String* name) {
+int ScopeInfo::FunctionContextSlotIndex(String* name) const {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
@@ -710,48 +791,63 @@ int ScopeInfo::FunctionContextSlotIndex(String* name) {
return -1;
}
-FunctionKind ScopeInfo::function_kind() {
+FunctionKind ScopeInfo::function_kind() const {
return FunctionKindField::decode(Flags());
}
-int ScopeInfo::ParameterNamesIndex() {
+int ScopeInfo::ParameterNamesIndex() const {
DCHECK_LT(0, length());
return kVariablePartIndex;
}
-int ScopeInfo::StackLocalFirstSlotIndex() {
+int ScopeInfo::StackLocalFirstSlotIndex() const {
return ParameterNamesIndex() + ParameterCount();
}
-int ScopeInfo::StackLocalNamesIndex() { return StackLocalFirstSlotIndex() + 1; }
+int ScopeInfo::StackLocalNamesIndex() const {
+ return StackLocalFirstSlotIndex() + 1;
+}
-int ScopeInfo::ContextLocalNamesIndex() {
+int ScopeInfo::ContextLocalNamesIndex() const {
return StackLocalNamesIndex() + StackLocalCount();
}
-int ScopeInfo::ContextLocalInfosIndex() {
+int ScopeInfo::ContextLocalInfosIndex() const {
return ContextLocalNamesIndex() + ContextLocalCount();
}
-int ScopeInfo::ReceiverInfoIndex() {
+int ScopeInfo::ReceiverInfoIndex() const {
return ContextLocalInfosIndex() + ContextLocalCount();
}
-int ScopeInfo::FunctionNameInfoIndex() {
+int ScopeInfo::FunctionNameInfoIndex() const {
return ReceiverInfoIndex() + (HasAllocatedReceiver() ? 1 : 0);
}
-int ScopeInfo::OuterScopeInfoIndex() {
- return FunctionNameInfoIndex() + (HasFunctionName() ? 2 : 0);
+int ScopeInfo::InferredFunctionNameIndex() const {
+ return FunctionNameInfoIndex() +
+ (HasFunctionName() ? kFunctionNameEntries : 0);
+}
+
+int ScopeInfo::PositionInfoIndex() const {
+ return InferredFunctionNameIndex() + (HasInferredFunctionName() ? 1 : 0);
}
-int ScopeInfo::ModuleInfoIndex() {
+int ScopeInfo::OuterScopeInfoIndex() const {
+ return PositionInfoIndex() + (HasPositionInfo() ? kPositionInfoEntries : 0);
+}
+
+int ScopeInfo::ModuleInfoIndex() const {
return OuterScopeInfoIndex() + (HasOuterScopeInfo() ? 1 : 0);
}
-int ScopeInfo::ModuleVariableCountIndex() { return ModuleInfoIndex() + 1; }
+int ScopeInfo::ModuleVariableCountIndex() const {
+ return ModuleInfoIndex() + 1;
+}
-int ScopeInfo::ModuleVariablesIndex() { return ModuleVariableCountIndex() + 1; }
+int ScopeInfo::ModuleVariablesIndex() const {
+ return ModuleVariableCountIndex() + 1;
+}
void ScopeInfo::ModuleVariable(int i, String** name, int* index,
VariableMode* mode,
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index 0532686ba0..c8efa475c1 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -38,26 +38,26 @@ class ScopeInfo : public FixedArray {
DECL_PRINTER(ScopeInfo)
// Return the type of this scope.
- ScopeType scope_type();
+ ScopeType scope_type() const;
// Return the language mode of this scope.
- LanguageMode language_mode();
+ LanguageMode language_mode() const;
// True if this scope is a (var) declaration scope.
- bool is_declaration_scope();
+ bool is_declaration_scope() const;
// Does this scope make a sloppy eval call?
- bool CallsSloppyEval();
+ bool CallsSloppyEval() const;
// Return the total number of locals allocated on the stack and in the
// context. This includes the parameters that are allocated in the context.
- int LocalCount();
+ int LocalCount() const;
// Return the number of stack slots for code. This number consists of two
// parts:
// 1. One stack slot per stack allocated local.
// 2. One stack slot for the function name if it is stack allocated.
- int StackSlotCount();
+ int StackSlotCount() const;
// Return the number of context slots for code if a context is allocated. This
// number consists of three parts:
@@ -66,57 +66,77 @@ class ScopeInfo : public FixedArray {
// 3. One context slot for the function name if it is context allocated.
// Parameters allocated in the context count as context allocated locals. If
// no contexts are allocated for this scope ContextLength returns 0.
- int ContextLength();
+ int ContextLength() const;
// Does this scope declare a "this" binding?
- bool HasReceiver();
+ bool HasReceiver() const;
// Does this scope declare a "this" binding, and the "this" binding is stack-
// or context-allocated?
- bool HasAllocatedReceiver();
+ bool HasAllocatedReceiver() const;
// Does this scope declare a "new.target" binding?
- bool HasNewTarget();
+ bool HasNewTarget() const;
// Is this scope the scope of a named function expression?
- bool HasFunctionName();
+ bool HasFunctionName() const;
+
+ // See SharedFunctionInfo::HasSharedName.
+ bool HasSharedFunctionName() const;
+
+ bool HasInferredFunctionName() const;
+
+ void SetFunctionName(Object* name);
+ void SetInferredFunctionName(String* name);
+
+ // Does this scope belong to a function?
+ bool HasPositionInfo() const;
// Return if contexts are allocated for this scope.
- bool HasContext();
+ bool HasContext() const;
// Return if this is a function scope with "use asm".
- inline bool IsAsmModule();
+ inline bool IsAsmModule() const;
- inline bool HasSimpleParameters();
+ inline bool HasSimpleParameters() const;
// Return the function_name if present.
- String* FunctionName();
+ Object* FunctionName() const;
+
+ // Return the function's inferred name if present.
+ // See SharedFunctionInfo::function_identifier.
+ Object* InferredFunctionName() const;
- ModuleInfo* ModuleDescriptorInfo();
+ // Position information accessors.
+ int StartPosition() const;
+ int EndPosition() const;
+ void SetPositionInfo(int start, int end);
+
+ ModuleInfo* ModuleDescriptorInfo() const;
// Return the name of the given parameter.
- String* ParameterName(int var);
+ String* ParameterName(int var) const;
// Return the name of the given local.
- String* LocalName(int var);
+ String* LocalName(int var) const;
// Return the name of the given stack local.
- String* StackLocalName(int var);
+ String* StackLocalName(int var) const;
// Return the name of the given stack local.
- int StackLocalIndex(int var);
+ int StackLocalIndex(int var) const;
// Return the name of the given context local.
- String* ContextLocalName(int var);
+ String* ContextLocalName(int var) const;
// Return the mode of the given context local.
- VariableMode ContextLocalMode(int var);
+ VariableMode ContextLocalMode(int var) const;
// Return the initialization flag of the given context local.
- InitializationFlag ContextLocalInitFlag(int var);
+ InitializationFlag ContextLocalInitFlag(int var) const;
// Return the initialization flag of the given context local.
- MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
+ MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var) const;
// Return true if this local was introduced by the compiler, and should not be
// exposed to the user in a debugger.
@@ -126,7 +146,7 @@ class ScopeInfo : public FixedArray {
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be an internalized
// string.
- int StackSlotIndex(String* name);
+ int StackSlotIndex(String* name) const;
// Lookup support for serialized scope info. Returns the local context slot
// index for a given slot name if the slot is present; otherwise
@@ -147,33 +167,33 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
// otherwise returns a value < 0. The name must be an internalized string.
- int ParameterIndex(String* name);
+ int ParameterIndex(String* name) const;
// Lookup support for serialized scope info. Returns the function context
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
// must be an internalized string.
- int FunctionContextSlotIndex(String* name);
+ int FunctionContextSlotIndex(String* name) const;
// Lookup support for serialized scope info. Returns the receiver context
// slot index if scope has a "this" binding, and the binding is
// context-allocated. Otherwise returns a value < 0.
- int ReceiverContextSlotIndex();
+ int ReceiverContextSlotIndex() const;
- FunctionKind function_kind();
+ FunctionKind function_kind() const;
// Returns true if this ScopeInfo is linked to a outer ScopeInfo.
- bool HasOuterScopeInfo();
+ bool HasOuterScopeInfo() const;
// Returns true if this ScopeInfo was created for a debug-evaluate scope.
- bool IsDebugEvaluateScope();
+ bool IsDebugEvaluateScope() const;
// Can be used to mark a ScopeInfo that looks like a with-scope as actually
// being a debug-evaluate scope.
void SetIsDebugEvaluateScope();
// Return the outer ScopeInfo if present.
- ScopeInfo* OuterScopeInfo();
+ ScopeInfo* OuterScopeInfo() const;
#ifdef DEBUG
bool Equals(ScopeInfo* other) const;
@@ -203,7 +223,7 @@ class ScopeInfo : public FixedArray {
#define FIELD_ACCESSORS(name) \
inline void Set##name(int value); \
- inline int name();
+ inline int name() const;
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
@@ -249,23 +269,32 @@ class ScopeInfo : public FixedArray {
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
// b. The context or stack slot index for the variable.
- // 8. OuterScopeInfoIndex:
+ // 8. InferredFunctionName:
+ // Contains the function's inferred name.
+ // 9. SourcePosition:
+ // Contains two slots with a) the startPosition and b) the endPosition if
+ // the scope belongs to a function or script.
+ // 10. OuterScopeInfoIndex:
// The outer scope's ScopeInfo or the hole if there's none.
- // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // 11. ModuleInfo, ModuleVariableCount, and ModuleVariables:
// For a module scope, this part contains the ModuleInfo, the number of
// MODULE-allocated variables, and the metadata of those variables. For
// non-module scopes it is empty.
- int ParameterNamesIndex();
- int StackLocalFirstSlotIndex();
- int StackLocalNamesIndex();
- int ContextLocalNamesIndex();
- int ContextLocalInfosIndex();
- int ReceiverInfoIndex();
- int FunctionNameInfoIndex();
- int OuterScopeInfoIndex();
- int ModuleInfoIndex();
- int ModuleVariableCountIndex();
- int ModuleVariablesIndex();
+ int ParameterNamesIndex() const;
+ int StackLocalFirstSlotIndex() const;
+ int StackLocalNamesIndex() const;
+ int ContextLocalNamesIndex() const;
+ int ContextLocalInfosIndex() const;
+ int ReceiverInfoIndex() const;
+ int FunctionNameInfoIndex() const;
+ int InferredFunctionNameIndex() const;
+ int PositionInfoIndex() const;
+ int OuterScopeInfoIndex() const;
+ int ModuleInfoIndex() const;
+ int ModuleVariableCountIndex() const;
+ int ModuleVariablesIndex() const;
+
+ static bool NeedsPositionInfo(ScopeType type);
int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
VariableLocation* location, InitializationFlag* init_flag,
@@ -283,6 +312,9 @@ class ScopeInfo : public FixedArray {
// the receiver.
enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+ static const int kFunctionNameEntries = 2;
+ static const int kPositionInfoEntries = 2;
+
// Properties of scopes.
class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
class CallsSloppyEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {
@@ -299,8 +331,12 @@ class ScopeInfo : public FixedArray {
: public BitField<bool, ReceiverVariableField::kNext, 1> {};
class FunctionVariableField
: public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
- class AsmModuleField
+ // TODO(cbruni): Combine with function variable field when only storing the
+ // function name.
+ class HasInferredFunctionNameField
: public BitField<bool, FunctionVariableField::kNext, 1> {};
+ class AsmModuleField
+ : public BitField<bool, HasInferredFunctionNameField::kNext, 1> {};
class HasSimpleParametersField
: public BitField<bool, AsmModuleField::kNext, 1> {};
class FunctionKindField
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index c5bd407628..c8779cf9ee 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -31,7 +31,8 @@ ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
-ACCESSORS(Script, shared_function_infos, FixedArray, kSharedFunctionInfosOffset)
+ACCESSORS(Script, shared_function_infos, WeakFixedArray,
+ kSharedFunctionInfosOffset)
SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 4d84be2262..2e0f16d26d 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -86,7 +86,7 @@ class Script : public Struct {
// [shared_function_infos]: weak fixed array containing all shared
// function infos created from this script.
- DECL_ACCESSORS(shared_function_infos, FixedArray)
+ DECL_ACCESSORS(shared_function_infos, WeakFixedArray)
// [flags]: Holds an exciting bitfield.
DECL_INT_ACCESSORS(flags)
@@ -185,7 +185,7 @@ class Script : public Struct {
Script* Next();
private:
- WeakFixedArray::Iterator iterator_;
+ FixedArrayOfWeakCells::Iterator iterator_;
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 2f3b32f17c..de416fcdd0 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -19,25 +19,28 @@ CAST_ACCESSOR(PreParsedScopeData)
ACCESSORS(PreParsedScopeData, scope_data, PodArray<uint8_t>, kScopeDataOffset)
ACCESSORS(PreParsedScopeData, child_data, FixedArray, kChildDataOffset)
+CAST_ACCESSOR(InterpreterData)
+ACCESSORS(InterpreterData, bytecode_array, BytecodeArray, kBytecodeArrayOffset)
+ACCESSORS(InterpreterData, interpreter_trampoline, Code,
+ kInterpreterTrampolineOffset)
+
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
CAST_ACCESSOR(SharedFunctionInfo)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
-ACCESSORS(SharedFunctionInfo, raw_name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
- kFeedbackMetadataOffset)
+ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
+ kNameOrScopeInfoOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
ACCESSORS(SharedFunctionInfo, script, Object, kScriptOffset)
ACCESSORS(SharedFunctionInfo, debug_info, Object, kDebugInfoOffset)
ACCESSORS(SharedFunctionInfo, function_identifier, Object,
kFunctionIdentifierOffset)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
is_named_expression,
SharedFunctionInfo::IsNamedExpressionBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
- SharedFunctionInfo::IsTopLevelBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
+ is_toplevel, SharedFunctionInfo::IsTopLevelBit)
INT_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
#if V8_SFI_HAS_UNIQUE_ID
@@ -48,51 +51,67 @@ INT_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
-INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+INT_ACCESSORS(SharedFunctionInfo, raw_end_position, kEndPositionOffset)
+INT_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
kStartPositionAndTypeOffset)
INT_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, compiler_hints, kCompilerHintsOffset)
+INT_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
-bool SharedFunctionInfo::has_shared_name() const {
- return raw_name() != kNoSharedNameSentinel;
+bool SharedFunctionInfo::HasSharedName() const {
+ Object* value = name_or_scope_info();
+ if (value->IsScopeInfo()) {
+ return ScopeInfo::cast(value)->HasSharedFunctionName();
+ }
+ return value != kNoSharedNameSentinel;
}
-String* SharedFunctionInfo::name() const {
- if (!has_shared_name()) return GetHeap()->empty_string();
- DCHECK(raw_name()->IsString());
- return String::cast(raw_name());
+String* SharedFunctionInfo::Name() const {
+ if (!HasSharedName()) return GetHeap()->empty_string();
+ Object* value = name_or_scope_info();
+ if (value->IsScopeInfo()) {
+ if (ScopeInfo::cast(value)->HasFunctionName()) {
+ return String::cast(ScopeInfo::cast(value)->FunctionName());
+ }
+ return GetHeap()->empty_string();
+ }
+ return String::cast(value);
}
-void SharedFunctionInfo::set_name(String* name) {
- set_raw_name(name);
+void SharedFunctionInfo::SetName(String* name) {
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ ScopeInfo::cast(maybe_scope_info)->SetFunctionName(name);
+ } else {
+ DCHECK(maybe_scope_info->IsString() ||
+ maybe_scope_info == kNoSharedNameSentinel);
+ set_name_or_scope_info(name);
+ }
UpdateFunctionMapIndex();
}
AbstractCode* SharedFunctionInfo::abstract_code() {
if (HasBytecodeArray()) {
- return AbstractCode::cast(bytecode_array());
+ return AbstractCode::cast(GetBytecodeArray());
} else {
- return AbstractCode::cast(code());
+ return AbstractCode::cast(GetCode());
}
}
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_wrapped,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_wrapped,
SharedFunctionInfo::IsWrappedBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
- has_duplicate_parameters,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
SharedFunctionInfo::HasDuplicateParametersBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_declaration,
SharedFunctionInfo::IsDeclarationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, native,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
SharedFunctionInfo::IsNativeBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, compiler_hints,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
requires_instance_fields_initializer,
SharedFunctionInfo::RequiresInstanceFieldsInitializer)
@@ -101,12 +120,12 @@ bool SharedFunctionInfo::optimization_disabled() const {
}
BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
- return DisabledOptimizationReasonBits::decode(compiler_hints());
+ return DisabledOptimizationReasonBits::decode(flags());
}
LanguageMode SharedFunctionInfo::language_mode() {
STATIC_ASSERT(LanguageModeSize == 2);
- return construct_language_mode(IsStrictBit::decode(compiler_hints()));
+ return construct_language_mode(IsStrictBit::decode(flags()));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
@@ -114,40 +133,60 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
- int hints = compiler_hints();
+ int hints = flags();
hints = IsStrictBit::update(hints, is_strict(language_mode));
- set_compiler_hints(hints);
+ set_flags(hints);
UpdateFunctionMapIndex();
}
FunctionKind SharedFunctionInfo::kind() const {
- return FunctionKindBits::decode(compiler_hints());
+ return FunctionKindBits::decode(flags());
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- int hints = compiler_hints();
+ int hints = flags();
hints = FunctionKindBits::update(hints, kind);
hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
hints = IsDerivedConstructorBit::update(hints, IsDerivedConstructor(kind));
- set_compiler_hints(hints);
+ set_flags(hints);
UpdateFunctionMapIndex();
}
bool SharedFunctionInfo::needs_home_object() const {
- return NeedsHomeObjectBit::decode(compiler_hints());
+ return NeedsHomeObjectBit::decode(flags());
}
void SharedFunctionInfo::set_needs_home_object(bool value) {
- int hints = compiler_hints();
+ int hints = flags();
hints = NeedsHomeObjectBit::update(hints, value);
- set_compiler_hints(hints);
+ set_flags(hints);
UpdateFunctionMapIndex();
}
+bool SharedFunctionInfo::construct_as_builtin() const {
+ return ConstructAsBuiltinBit::decode(flags());
+}
+
+void SharedFunctionInfo::CalculateConstructAsBuiltin() {
+ bool uses_builtins_construct_stub = false;
+ if (HasBuiltinId()) {
+ int id = builtin_id();
+ if (id != Builtins::kCompileLazy && id != Builtins::kEmptyFunction) {
+ uses_builtins_construct_stub = true;
+ }
+ } else if (IsApiFunction()) {
+ uses_builtins_construct_stub = true;
+ }
+
+ int f = flags();
+ f = ConstructAsBuiltinBit::update(f, uses_builtins_construct_stub);
+ set_flags(f);
+}
+
int SharedFunctionInfo::function_map_index() const {
// Note: Must be kept in sync with the FastNewClosure builtin.
- int index = Context::FIRST_FUNCTION_MAP_INDEX +
- FunctionMapIndexBits::decode(compiler_hints());
+ int index =
+ Context::FIRST_FUNCTION_MAP_INDEX + FunctionMapIndexBits::decode(flags());
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
return index;
}
@@ -158,7 +197,7 @@ void SharedFunctionInfo::set_function_map_index(int index) {
DCHECK_LE(Context::FIRST_FUNCTION_MAP_INDEX, index);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
index -= Context::FIRST_FUNCTION_MAP_INDEX;
- set_compiler_hints(FunctionMapIndexBits::update(compiler_hints(), index));
+ set_flags(FunctionMapIndexBits::update(flags(), index));
}
void SharedFunctionInfo::clear_padding() {
@@ -167,7 +206,7 @@ void SharedFunctionInfo::clear_padding() {
void SharedFunctionInfo::UpdateFunctionMapIndex() {
int map_index = Context::FunctionMapIndex(
- language_mode(), kind(), true, has_shared_name(), needs_home_object());
+ language_mode(), kind(), true, HasSharedName(), needs_home_object());
set_function_map_index(map_index);
}
@@ -181,6 +220,9 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, has_no_side_effect,
SharedFunctionInfo::HasNoSideEffectBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
+ requires_runtime_side_effect_checks,
+ SharedFunctionInfo::RequiresRuntimeSideEffectChecksBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
computed_has_no_side_effect,
SharedFunctionInfo::ComputedHasNoSideEffectBit)
BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
@@ -191,51 +233,167 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints,
has_reported_binary_coverage,
SharedFunctionInfo::HasReportedBinaryCoverageBit)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, debugger_hints, debugging_id,
+ SharedFunctionInfo::DebuggingIdBits)
void SharedFunctionInfo::DontAdaptArguments() {
- DCHECK(code()->kind() == Code::BUILTIN || code()->kind() == Code::STUB);
+ // TODO(leszeks): Revise this DCHECK now that the code field is gone.
+ DCHECK(!HasCodeObject());
set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, start_position_and_type, start_position,
- SharedFunctionInfo::StartPositionBits)
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, raw_start_position_and_type,
+ raw_start_position, SharedFunctionInfo::StartPositionBits)
-Code* SharedFunctionInfo::code() const {
- return Code::cast(READ_FIELD(this, kCodeOffset));
+int SharedFunctionInfo::StartPosition() const {
+ ScopeInfo* info = scope_info();
+ if (!info->HasPositionInfo()) {
+ // TODO(cbruni): use preparsed_scope_data
+ return raw_start_position();
+ }
+ return info->StartPosition();
}
-void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
- // If the SharedFunctionInfo has bytecode we should never mark it for lazy
- // compile, since the bytecode is never flushed.
- DCHECK(value != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy) ||
- !HasBytecodeArray());
- WRITE_FIELD(this, kCodeOffset, value);
- CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
+int SharedFunctionInfo::EndPosition() const {
+ ScopeInfo* info = scope_info();
+ if (!info->HasPositionInfo()) {
+ // TODO(cbruni): use preparsed_scope_data
+ return raw_end_position();
+ }
+ return info->EndPosition();
+}
+
+Code* SharedFunctionInfo::GetCode() const {
+ // ======
+ // NOTE: This chain of checks MUST be kept in sync with the equivalent CSA
+ // GetSharedFunctionInfoCode method in code-stub-assembler.cc, and the
+ // architecture-specific GetSharedFunctionInfoCode methods in builtins-*.cc.
+ // ======
+
+ Isolate* isolate = GetIsolate();
+ Object* data = function_data();
+ if (data->IsSmi()) {
+ // Holding a Smi means we are a builtin.
+ DCHECK(HasBuiltinId());
+ return isolate->builtins()->builtin(builtin_id());
+ } else if (data->IsBytecodeArray()) {
+ // Having a bytecode array means we are a compiled, interpreted function.
+ DCHECK(HasBytecodeArray());
+ return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ } else if (data->IsFixedArray()) {
+ // Having a fixed array means we are an asm.js/wasm function.
+ DCHECK(HasAsmWasmData());
+ return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
+ } else if (data->IsPreParsedScopeData()) {
+ // Having pre-parsed scope data means we need to compile.
+ DCHECK(HasPreParsedScopeData());
+ return isolate->builtins()->builtin(Builtins::kCompileLazy);
+ } else if (data->IsFunctionTemplateInfo()) {
+ // Having a function template info means we are an API function.
+ DCHECK(IsApiFunction());
+ return isolate->builtins()->builtin(Builtins::kHandleApiCall);
+ } else if (data->IsCode()) {
+ // Having a code object means we should run it.
+ DCHECK(HasCodeObject());
+ return Code::cast(data);
+ } else if (data->IsInterpreterData()) {
+ Code* code = InterpreterTrampoline();
+ DCHECK(code->IsCode());
+ DCHECK(code->is_interpreter_trampoline_builtin());
+ return code;
+ }
+ UNREACHABLE();
}
-bool SharedFunctionInfo::IsInterpreted() const {
- return code()->is_interpreter_trampoline_builtin();
-}
+bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo* SharedFunctionInfo::scope_info() const {
- return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
+ Object* maybe_scope_info = name_or_scope_info();
+ if (maybe_scope_info->IsScopeInfo()) {
+ return ScopeInfo::cast(maybe_scope_info);
+ }
+ return ScopeInfo::Empty(GetIsolate());
}
-void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(ScopeInfo* scope_info,
WriteBarrierMode mode) {
- WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset,
- reinterpret_cast<Object*>(value), mode);
+ // TODO(cbruni): this code is no longer necessary once we store the positon
+ // only on the ScopeInfo.
+ if (scope_info->HasPositionInfo()) {
+ scope_info->SetPositionInfo(raw_start_position(), raw_end_position());
+ }
+ // Move the existing name onto the ScopeInfo.
+ Object* name = name_or_scope_info();
+ if (name->IsScopeInfo()) {
+ name = ScopeInfo::cast(name)->FunctionName();
+ }
+ DCHECK(name->IsString() || name == kNoSharedNameSentinel);
+ // Only set the function name for function scopes.
+ scope_info->SetFunctionName(name);
+ if (HasInferredName() && inferred_name()->length() != 0) {
+ scope_info->SetInferredFunctionName(inferred_name());
+ }
+ WRITE_FIELD(this, kNameOrScopeInfoOffset,
+ reinterpret_cast<Object*>(scope_info));
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kNameOrScopeInfoOffset,
+ reinterpret_cast<Object*>(scope_info), mode);
+}
+
+ACCESSORS(SharedFunctionInfo, raw_outer_scope_info_or_feedback_metadata,
+ HeapObject, kOuterScopeInfoOrFeedbackMetadataOffset)
+
+HeapObject* SharedFunctionInfo::outer_scope_info() const {
+ DCHECK(!is_compiled());
+ DCHECK(!HasFeedbackMetadata());
+ return raw_outer_scope_info_or_feedback_metadata();
+}
+
+bool SharedFunctionInfo::HasOuterScopeInfo() const {
+ ScopeInfo* outer_info = nullptr;
+ if (!is_compiled()) {
+ if (!outer_scope_info()->IsScopeInfo()) return false;
+ outer_info = ScopeInfo::cast(outer_scope_info());
+ } else {
+ if (!scope_info()->HasOuterScopeInfo()) return false;
+ outer_info = scope_info()->OuterScopeInfo();
+ }
+ return outer_info->length() > 0;
+}
+
+ScopeInfo* SharedFunctionInfo::GetOuterScopeInfo() const {
+ DCHECK(HasOuterScopeInfo());
+ if (!is_compiled()) return ScopeInfo::cast(outer_scope_info());
+ return scope_info()->OuterScopeInfo();
+}
+
+void SharedFunctionInfo::set_outer_scope_info(HeapObject* value,
+ WriteBarrierMode mode) {
+ DCHECK(!is_compiled());
+ DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole(GetIsolate()));
+ DCHECK(value->IsScopeInfo() || value->IsTheHole(GetIsolate()));
+ return set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
-ACCESSORS(SharedFunctionInfo, outer_scope_info, HeapObject,
- kOuterScopeInfoOffset)
+bool SharedFunctionInfo::HasFeedbackMetadata() const {
+ return raw_outer_scope_info_or_feedback_metadata()->IsFeedbackMetadata();
+}
+
+FeedbackMetadata* SharedFunctionInfo::feedback_metadata() const {
+ DCHECK(HasFeedbackMetadata());
+ return FeedbackMetadata::cast(raw_outer_scope_info_or_feedback_metadata());
+}
+
+void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata* value,
+ WriteBarrierMode mode) {
+ DCHECK(!HasFeedbackMetadata());
+ DCHECK(value->IsFeedbackMetadata());
+ return set_raw_outer_scope_info_or_feedback_metadata(value, mode);
+}
bool SharedFunctionInfo::is_compiled() const {
- Builtins* builtins = GetIsolate()->builtins();
- DCHECK(code() != builtins->builtin(Builtins::kCheckOptimizationMarker));
- return code() != builtins->builtin(Builtins::kCompileLazy);
+ Object* data = function_data();
+ return data != Smi::FromEnum(Builtins::kCompileLazy) &&
+ !data->IsPreParsedScopeData();
}
int SharedFunctionInfo::GetLength() const {
@@ -259,7 +417,7 @@ bool SharedFunctionInfo::HasDebugInfo() const {
return has_debug_info;
}
-bool SharedFunctionInfo::IsApiFunction() {
+bool SharedFunctionInfo::IsApiFunction() const {
return function_data()->IsFunctionTemplateInfo();
}
@@ -268,28 +426,44 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
return FunctionTemplateInfo::cast(function_data());
}
-void SharedFunctionInfo::set_api_func_data(FunctionTemplateInfo* data) {
- DCHECK(function_data()->IsUndefined(GetIsolate()));
- set_function_data(data);
-}
-
bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data()->IsBytecodeArray();
+ return function_data()->IsBytecodeArray() ||
+ function_data()->IsInterpreterData();
}
-BytecodeArray* SharedFunctionInfo::bytecode_array() const {
+BytecodeArray* SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
- return BytecodeArray::cast(function_data());
+ if (function_data()->IsBytecodeArray()) {
+ return BytecodeArray::cast(function_data());
+ } else {
+ DCHECK(function_data()->IsInterpreterData());
+ return InterpreterData::cast(function_data())->bytecode_array();
+ }
}
-void SharedFunctionInfo::set_bytecode_array(BytecodeArray* bytecode) {
- DCHECK(function_data()->IsUndefined(GetIsolate()));
+void SharedFunctionInfo::set_bytecode_array(class BytecodeArray* bytecode) {
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
set_function_data(bytecode);
}
-void SharedFunctionInfo::ClearBytecodeArray() {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasBytecodeArray());
- set_function_data(GetHeap()->undefined_value());
+Code* SharedFunctionInfo::InterpreterTrampoline() const {
+ DCHECK(HasInterpreterData());
+ return interpreter_data()->interpreter_trampoline();
+}
+
+bool SharedFunctionInfo::HasInterpreterData() const {
+ return function_data()->IsInterpreterData();
+}
+
+InterpreterData* SharedFunctionInfo::interpreter_data() const {
+ DCHECK(HasInterpreterData());
+ return InterpreterData::cast(function_data());
+}
+
+void SharedFunctionInfo::set_interpreter_data(
+ InterpreterData* interpreter_data) {
+ DCHECK(FLAG_interpreted_frames_native_stack);
+ set_function_data(interpreter_data);
}
bool SharedFunctionInfo::HasAsmWasmData() const {
@@ -302,26 +476,28 @@ FixedArray* SharedFunctionInfo::asm_wasm_data() const {
}
void SharedFunctionInfo::set_asm_wasm_data(FixedArray* data) {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ HasAsmWasmData());
set_function_data(data);
}
-void SharedFunctionInfo::ClearAsmWasmData() {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasAsmWasmData());
- set_function_data(GetHeap()->undefined_value());
-}
-
-bool SharedFunctionInfo::HasLazyDeserializationBuiltinId() const {
+bool SharedFunctionInfo::HasBuiltinId() const {
return function_data()->IsSmi();
}
-int SharedFunctionInfo::lazy_deserialization_builtin_id() const {
- DCHECK(HasLazyDeserializationBuiltinId());
+int SharedFunctionInfo::builtin_id() const {
+ DCHECK(HasBuiltinId());
int id = Smi::ToInt(function_data());
DCHECK(Builtins::IsBuiltinId(id));
return id;
}
+void SharedFunctionInfo::set_builtin_id(int builtin_id) {
+ DCHECK(Builtins::IsBuiltinId(builtin_id));
+ DCHECK_NE(builtin_id, Builtins::kDeserializeLazy);
+ set_function_data(Smi::FromInt(builtin_id), SKIP_WRITE_BARRIER);
+}
+
bool SharedFunctionInfo::HasPreParsedScopeData() const {
return function_data()->IsPreParsedScopeData();
}
@@ -333,13 +509,18 @@ PreParsedScopeData* SharedFunctionInfo::preparsed_scope_data() const {
void SharedFunctionInfo::set_preparsed_scope_data(
PreParsedScopeData* preparsed_scope_data) {
- DCHECK(function_data()->IsUndefined(GetIsolate()));
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
set_function_data(preparsed_scope_data);
}
void SharedFunctionInfo::ClearPreParsedScopeData() {
- DCHECK(function_data()->IsUndefined(GetIsolate()) || HasPreParsedScopeData());
- set_function_data(GetHeap()->undefined_value());
+ DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy) ||
+ HasPreParsedScopeData());
+ set_builtin_id(Builtins::kCompileLazy);
+}
+
+bool SharedFunctionInfo::HasCodeObject() const {
+ return function_data()->IsCode();
}
bool SharedFunctionInfo::HasBuiltinFunctionId() {
@@ -384,6 +565,36 @@ bool SharedFunctionInfo::IsSubjectToDebugging() {
return IsUserJavaScript() && !HasAsmWasmData();
}
+bool SharedFunctionInfo::CanFlushCompiled() const {
+ bool can_decompile =
+ (HasBytecodeArray() || HasAsmWasmData() || HasPreParsedScopeData());
+ return can_decompile;
+}
+
+void SharedFunctionInfo::FlushCompiled() {
+ DisallowHeapAllocation no_gc;
+
+ DCHECK(CanFlushCompiled());
+
+ Oddball* the_hole = GetIsolate()->heap()->the_hole_value();
+
+ if (is_compiled()) {
+ HeapObject* outer_scope_info = the_hole;
+ if (!is_toplevel()) {
+ if (scope_info()->HasOuterScopeInfo()) {
+ outer_scope_info = scope_info()->OuterScopeInfo();
+ }
+ }
+ // Raw setter to avoid validity checks, since we're performing the unusual
+ // task of decompiling.
+ set_raw_outer_scope_info_or_feedback_metadata(outer_scope_info);
+ } else {
+ DCHECK(outer_scope_info()->IsScopeInfo() || is_toplevel());
+ }
+
+ set_builtin_id(Builtins::kCompileLazy);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 077088dd28..06abe8a2c7 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -35,6 +35,24 @@ class PreParsedScopeData : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PreParsedScopeData);
};
+class InterpreterData : public Struct {
+ public:
+ DECL_ACCESSORS(bytecode_array, BytecodeArray)
+ DECL_ACCESSORS(interpreter_trampoline, Code)
+
+ static const int kBytecodeArrayOffset = Struct::kHeaderSize;
+ static const int kInterpreterTrampolineOffset =
+ kBytecodeArrayOffset + kPointerSize;
+ static const int kSize = kInterpreterTrampolineOffset + kPointerSize;
+
+ DECL_CAST(InterpreterData)
+ DECL_PRINTER(InterpreterData)
+ DECL_VERIFIER(InterpreterData)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(InterpreterData);
+};
+
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo : public HeapObject {
@@ -42,11 +60,11 @@ class SharedFunctionInfo : public HeapObject {
static constexpr Object* const kNoSharedNameSentinel = Smi::kZero;
// [name]: Returns shared name if it exists or an empty string otherwise.
- inline String* name() const;
- inline void set_name(String* name);
+ inline String* Name() const;
+ inline void SetName(String* name);
- // [code]: Function code.
- DECL_ACCESSORS(code, Code)
+ // Get the code object which represents the execution of this function.
+ inline Code* GetCode() const;
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
@@ -86,16 +104,24 @@ class SharedFunctionInfo : public HeapObject {
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
- // The outer scope info for the purpose of parsing this function, or the hole
- // value if it isn't yet known.
- DECL_ACCESSORS(outer_scope_info, HeapObject)
+ // End position of this function in the script source.
+ inline int EndPosition() const;
- // [construct stub]: Code stub for constructing instances of this function.
- DECL_ACCESSORS(construct_stub, Code)
+ // Start position of this function in the script source.
+ inline int StartPosition() const;
- // Sets the given code as the construct stub, and marks builtin code objects
- // as a construct stub.
- void SetConstructStub(Code* code);
+ // [outer scope info | feedback metadata] Shared storage for outer scope info
+ // (on uncompiled functions) and feedback metadata (on compiled functions).
+ DECL_ACCESSORS(raw_outer_scope_info_or_feedback_metadata, HeapObject)
+
+ // Get the outer scope info whether this function is compiled or not.
+ inline bool HasOuterScopeInfo() const;
+ inline ScopeInfo* GetOuterScopeInfo() const;
+
+ // [feedback metadata] Metadata template for feedback vectors of instances of
+ // this function.
+ inline bool HasFeedbackMetadata() const;
+ DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
// Returns if this function has been compiled to native code yet.
inline bool is_compiled() const;
@@ -120,11 +146,6 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_INT_ACCESSORS(expected_nof_properties)
- // [feedback_metadata] - describes ast node feedback from full-codegen and
- // (increasingly) from crankshafted code where sufficient feedback isn't
- // available.
- DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
-
// [function_literal_id] - uniquely identifies the FunctionLiteral this
// SharedFunctionInfo represents within its script, or -1 if this
// SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
@@ -140,33 +161,42 @@ class SharedFunctionInfo : public HeapObject {
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
// - a BytecodeArray for the interpreter [HasBytecodeArray()].
+ // - a InterpreterData with the BytecodeArray and a copy of the
+ // interpreter trampoline [HasInterpreterData()]
// - a FixedArray with Asm->Wasm conversion [HasAsmWasmData()].
- // - a Smi containing the builtin id [HasLazyDeserializationBuiltinId()]
+ // - a Smi containing the builtin id [HasBuiltinId()]
// - a PreParsedScopeData for the parser [HasPreParsedScopeData()]
+ // - a Code object otherwise [HasCodeObject()]
DECL_ACCESSORS(function_data, Object)
- inline bool IsApiFunction();
+ inline bool IsApiFunction() const;
inline FunctionTemplateInfo* get_api_func_data();
inline void set_api_func_data(FunctionTemplateInfo* data);
inline bool HasBytecodeArray() const;
- inline BytecodeArray* bytecode_array() const;
- inline void set_bytecode_array(BytecodeArray* bytecode);
- inline void ClearBytecodeArray();
+ inline BytecodeArray* GetBytecodeArray() const;
+ inline void set_bytecode_array(class BytecodeArray* bytecode);
+ inline Code* InterpreterTrampoline() const;
+ inline bool HasInterpreterData() const;
+ inline InterpreterData* interpreter_data() const;
+ inline void set_interpreter_data(InterpreterData* interpreter_data);
inline bool HasAsmWasmData() const;
inline FixedArray* asm_wasm_data() const;
inline void set_asm_wasm_data(FixedArray* data);
- inline void ClearAsmWasmData();
// A brief note to clear up possible confusion:
- // lazy_deserialization_builtin_id corresponds to the auto-generated
+ // builtin_id corresponds to the auto-generated
// Builtins::Name id, while builtin_function_id corresponds to
// BuiltinFunctionId (a manually maintained list of 'interesting' functions
// mainly used during optimization).
- inline bool HasLazyDeserializationBuiltinId() const;
- inline int lazy_deserialization_builtin_id() const;
+ inline bool HasBuiltinId() const;
+ inline int builtin_id() const;
+ inline void set_builtin_id(int builtin_id);
inline bool HasPreParsedScopeData() const;
inline PreParsedScopeData* preparsed_scope_data() const;
inline void set_preparsed_scope_data(PreParsedScopeData* data);
inline void ClearPreParsedScopeData();
+ inline bool HasCodeObject() const;
+ inline Code* code_object() const;
+ inline void set_code_object();
// [function identifier]: This field holds an additional identifier for the
// function.
@@ -189,13 +219,6 @@ class SharedFunctionInfo : public HeapObject {
// [script]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
- // [start_position_and_type]: Field used to store both the source code
- // position, whether or not the function is a function expression,
- // and whether or not the function is a toplevel function. The two
- // least significants bit indicates whether the function is an
- // expression and the rest contains the source code position.
- DECL_INT_ACCESSORS(start_position_and_type)
-
// The function is subject to debugging if a debug info is attached.
inline bool HasDebugInfo() const;
DebugInfo* GetDebugInfo() const;
@@ -203,6 +226,7 @@ class SharedFunctionInfo : public HeapObject {
// Break infos are contained in DebugInfo, this is a convenience method
// to simplify access.
bool HasBreakInfo() const;
+ bool BreakAtEntry() const;
// Coverage infos are contained in DebugInfo, this is a convenience method
// to simplify access.
@@ -235,6 +259,9 @@ class SharedFunctionInfo : public HeapObject {
// Indicates that the function cannot cause side-effects.
DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+ // Indicates that the function requires runtime side-effect checks.
+ DECL_BOOLEAN_ACCESSORS(requires_runtime_side_effect_checks);
+
// Indicates that |has_no_side_effect| has been computed and set.
DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
@@ -247,6 +274,11 @@ class SharedFunctionInfo : public HeapObject {
// Indicates that the function has been reported for binary code coverage.
DECL_BOOLEAN_ACCESSORS(has_reported_binary_coverage)
+ // Id assigned to the function for debugging.
+ // This could also be implemented as a weak hash table.
+ inline int debugging_id() const;
+ inline void set_debugging_id(int value);
+
// The function's name if it is non-empty, otherwise the inferred name.
String* DebugName();
@@ -259,14 +291,24 @@ class SharedFunctionInfo : public HeapObject {
// Position of the 'function' token in the script source.
DECL_INT_ACCESSORS(function_token_position)
+ // [raw_start_position_and_type]: Field used to store both the source code
+ // position, whether or not the function is a function expression,
+ // and whether or not the function is a toplevel function. The two
+ // least significants bit indicates whether the function is an
+ // expression and the rest contains the source code position.
+ // TODO(cbruni): start_position should be removed from SFI.
+ DECL_INT_ACCESSORS(raw_start_position_and_type)
+
// Position of this function in the script source.
- DECL_INT_ACCESSORS(start_position)
+ // TODO(cbruni): start_position should be removed from SFI.
+ DECL_INT_ACCESSORS(raw_start_position)
// End position of this function in the script source.
- DECL_INT_ACCESSORS(end_position)
+ // TODO(cbruni): end_position should be removed from SFI.
+ DECL_INT_ACCESSORS(raw_end_position)
// Returns true if the function has shared name.
- inline bool has_shared_name() const;
+ inline bool HasSharedName() const;
// Is this function a named function expression in the source code.
DECL_BOOLEAN_ACCESSORS(is_named_expression)
@@ -274,9 +316,8 @@ class SharedFunctionInfo : public HeapObject {
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
- // Bit field containing various information collected by the compiler to
- // drive optimization.
- DECL_INT_ACCESSORS(compiler_hints)
+ // [flags] Bit field containing various flags about the function.
+ DECL_INT_ACCESSORS(flags)
// Indicates if this function can be lazy compiled.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
@@ -349,6 +390,14 @@ class SharedFunctionInfo : public HeapObject {
// Whether this function is defined in user-provided JavaScript code.
inline bool IsUserJavaScript();
+ // True if one can flush compiled code from this function, in such a way that
+ // it can later be re-compiled.
+ inline bool CanFlushCompiled() const;
+
+ // Flush compiled data from this function, setting it back to CompileLazy and
+ // clearing any feedback metadata.
+ inline void FlushCompiled();
+
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -362,11 +411,20 @@ class SharedFunctionInfo : public HeapObject {
// Initialize a SharedFunctionInfo from a parsed function literal.
static void InitFromFunctionLiteral(Handle<SharedFunctionInfo> shared_info,
- FunctionLiteral* lit);
+ FunctionLiteral* lit, bool is_toplevel);
// Sets the expected number of properties based on estimate from parser.
void SetExpectedNofPropertiesFromEstimate(FunctionLiteral* literal);
+ inline bool construct_as_builtin() const;
+
+ // Determines and sets the ConstructAsBuiltinBit in |flags|, based on the
+ // |function_data|. Must be called when creating the SFI after other fields
+ // are initialized. The ConstructAsBuiltinBit determines whether
+ // JSBuiltinsConstructStub or JSConstructStubGeneric should be called to
+ // construct this function.
+ inline void CalculateConstructAsBuiltin();
+
// Dispatched behavior.
DECL_PRINTER(SharedFunctionInfo)
DECL_VERIFIER(SharedFunctionInfo)
@@ -378,7 +436,8 @@ class SharedFunctionInfo : public HeapObject {
class ScriptIterator {
public:
explicit ScriptIterator(Handle<Script> script);
- ScriptIterator(Isolate* isolate, Handle<FixedArray> shared_function_infos);
+ ScriptIterator(Isolate* isolate,
+ Handle<WeakFixedArray> shared_function_infos);
SharedFunctionInfo* Next();
// Reset the iterator to run on |script|.
@@ -386,7 +445,7 @@ class SharedFunctionInfo : public HeapObject {
private:
Isolate* isolate_;
- Handle<FixedArray> shared_function_infos_;
+ Handle<WeakFixedArray> shared_function_infos_;
int index_;
DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
};
@@ -399,7 +458,7 @@ class SharedFunctionInfo : public HeapObject {
private:
Script::Iterator script_iterator_;
- WeakFixedArray::Iterator noscript_sfi_iterator_;
+ FixedArrayOfWeakCells::Iterator noscript_sfi_iterator_;
SharedFunctionInfo::ScriptIterator sfi_iterator_;
DisallowHeapAllocation no_gc_;
DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
@@ -418,30 +477,27 @@ class SharedFunctionInfo : public HeapObject {
#endif
// Layout description.
-#define SHARED_FUNCTION_INFO_FIELDS(V) \
- /* Pointer fields. */ \
- V(kCodeOffset, kPointerSize) \
- V(kNameOffset, kPointerSize) \
- V(kScopeInfoOffset, kPointerSize) \
- V(kOuterScopeInfoOffset, kPointerSize) \
- V(kConstructStubOffset, kPointerSize) \
- V(kFunctionDataOffset, kPointerSize) \
- V(kScriptOffset, kPointerSize) \
- V(kDebugInfoOffset, kPointerSize) \
- V(kFunctionIdentifierOffset, kPointerSize) \
- V(kFeedbackMetadataOffset, kPointerSize) \
- V(kEndOfPointerFieldsOffset, 0) \
- /* Raw data fields. */ \
- V(kFunctionLiteralIdOffset, kInt32Size) \
- V(kUniqueIdOffset, kUniqueIdFieldSize) \
- V(kLengthOffset, kInt32Size) \
- V(kFormalParameterCountOffset, kInt32Size) \
- V(kExpectedNofPropertiesOffset, kInt32Size) \
- V(kStartPositionAndTypeOffset, kInt32Size) \
- V(kEndPositionOffset, kInt32Size) \
- V(kFunctionTokenPositionOffset, kInt32Size) \
- V(kCompilerHintsOffset, kInt32Size) \
- /* Total size. */ \
+#define SHARED_FUNCTION_INFO_FIELDS(V) \
+ /* Pointer fields. */ \
+ V(kStartOfPointerFieldsOffset, 0) \
+ V(kFunctionDataOffset, kPointerSize) \
+ V(kNameOrScopeInfoOffset, kPointerSize) \
+ V(kOuterScopeInfoOrFeedbackMetadataOffset, kPointerSize) \
+ V(kScriptOffset, kPointerSize) \
+ V(kDebugInfoOffset, kPointerSize) \
+ V(kFunctionIdentifierOffset, kPointerSize) \
+ V(kEndOfPointerFieldsOffset, 0) \
+ /* Raw data fields. */ \
+ V(kFunctionLiteralIdOffset, kInt32Size) \
+ V(kUniqueIdOffset, kUniqueIdFieldSize) \
+ V(kLengthOffset, kInt32Size) \
+ V(kFormalParameterCountOffset, kInt32Size) \
+ V(kExpectedNofPropertiesOffset, kInt32Size) \
+ V(kStartPositionAndTypeOffset, kInt32Size) \
+ V(kEndPositionOffset, kInt32Size) \
+ V(kFunctionTokenPositionOffset, kInt32Size) \
+ V(kFlagsOffset, kInt32Size) \
+ /* Total size. */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
@@ -450,12 +506,13 @@ class SharedFunctionInfo : public HeapObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
- typedef FixedBodyDescriptor<kCodeOffset, kEndOfPointerFieldsOffset, kSize>
+ typedef FixedBodyDescriptor<kStartOfPointerFieldsOffset,
+ kEndOfPointerFieldsOffset, kSize>
BodyDescriptor;
// No weak fields.
typedef BodyDescriptor BodyDescriptorWeak;
-// Bit fields in |start_position_and_type|.
+// Bit fields in |raw_start_position_and_type|.
#define START_POSITION_AND_TYPE_BIT_FIELDS(V, _) \
V(IsNamedExpressionBit, bool, 1, _) \
V(IsTopLevelBit, bool, 1, _) \
@@ -464,8 +521,8 @@ class SharedFunctionInfo : public HeapObject {
DEFINE_BIT_FIELDS(START_POSITION_AND_TYPE_BIT_FIELDS)
#undef START_POSITION_AND_TYPE_BIT_FIELDS
-// Bit positions in |compiler_hints|.
-#define COMPILER_HINTS_BIT_FIELDS(V, _) \
+// Bit positions in |flags|.
+#define FLAGS_BIT_FIELDS(V, _) \
V(IsNativeBit, bool, 1, _) \
V(IsStrictBit, bool, 1, _) \
V(IsWrappedBit, bool, 1, _) \
@@ -479,10 +536,11 @@ class SharedFunctionInfo : public HeapObject {
V(IsAsmWasmBrokenBit, bool, 1, _) \
V(FunctionMapIndexBits, int, 5, _) \
V(DisabledOptimizationReasonBits, BailoutReason, 4, _) \
- V(RequiresInstanceFieldsInitializer, bool, 1, _)
+ V(RequiresInstanceFieldsInitializer, bool, 1, _) \
+ V(ConstructAsBuiltinBit, bool, 1, _)
- DEFINE_BIT_FIELDS(COMPILER_HINTS_BIT_FIELDS)
-#undef COMPILER_HINTS_BIT_FIELDS
+ DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
+#undef FLAGS_BIT_FIELDS
// Bailout reasons must fit in the DisabledOptimizationReason bitfield.
STATIC_ASSERT(BailoutReason::kLastErrorMessage <=
@@ -491,27 +549,36 @@ class SharedFunctionInfo : public HeapObject {
STATIC_ASSERT(kLastFunctionKind <= FunctionKindBits::kMax);
// Bit positions in |debugger_hints|.
-#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
- V(IsAnonymousExpressionBit, bool, 1, _) \
- V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
- V(IsDeserializedBit, bool, 1, _) \
- V(HasNoSideEffectBit, bool, 1, _) \
- V(ComputedHasNoSideEffectBit, bool, 1, _) \
- V(DebugIsBlackboxedBit, bool, 1, _) \
- V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
- V(HasReportedBinaryCoverageBit, bool, 1, _)
+#define DEBUGGER_HINTS_BIT_FIELDS(V, _) \
+ V(IsAnonymousExpressionBit, bool, 1, _) \
+ V(NameShouldPrintAsAnonymousBit, bool, 1, _) \
+ V(IsDeserializedBit, bool, 1, _) \
+ V(HasNoSideEffectBit, bool, 1, _) \
+ V(RequiresRuntimeSideEffectChecksBit, bool, 1, _) \
+ V(ComputedHasNoSideEffectBit, bool, 1, _) \
+ V(DebugIsBlackboxedBit, bool, 1, _) \
+ V(ComputedDebugIsBlackboxedBit, bool, 1, _) \
+ V(HasReportedBinaryCoverageBit, bool, 1, _) \
+ V(DebuggingIdBits, int, 20, _)
DEFINE_BIT_FIELDS(DEBUGGER_HINTS_BIT_FIELDS)
#undef DEBUGGER_HINTS_BIT_FIELDS
+ static const int kNoDebuggingId = 0;
+
// Indicates that this function uses a super property (or an eval that may
// use a super property).
// This is needed to set up the [[HomeObject]] on the function instance.
inline bool needs_home_object() const;
private:
- // [raw_name]: Function name string or kNoSharedNameSentinel.
- DECL_ACCESSORS(raw_name, Object)
+ // [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
+ // ScopeInfo.
+ DECL_ACCESSORS(name_or_scope_info, Object)
+
+ // [outer scope info] The outer scope info, needed to lazily parse this
+ // function.
+ DECL_ACCESSORS(outer_scope_info, HeapObject)
inline void set_kind(FunctionKind kind);
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index 9b64444de2..68a295f80f 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/string.h"
#include "src/conversions-inl.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/objects/name-inl.h"
#include "src/string-hasher-inl.h"
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index 382fe06bf4..2e5be87b21 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -61,12 +61,17 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
Handle<String> key);
static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
+ static Handle<String> AddKeyNoResize(Isolate* isolate, StringTableKey* key);
static String* ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
String* string);
+ // Shink the StringTable if it's very empty (kMaxEmptyFactor) to avoid the
+ // performance overhead of re-allocating the StringTable over and over again.
+ static Handle<StringTable> CautiousShrink(Handle<StringTable> table);
+
// Looks up a string that is equal to the given string and returns
// string handle if it is found, or an empty handle otherwise.
- MUST_USE_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
Isolate* isolate, uint16_t c1, uint16_t c2);
static Object* LookupStringIfExists_NoAllocate(String* string);
@@ -74,6 +79,10 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
DECL_CAST(StringTable)
+ static const int kMaxEmptyFactor = 8;
+ static const int kMinCapacity = 2048;
+ static const int kMinShrinkCapacity = kMinCapacity;
+
private:
template <bool seq_one_byte>
friend class JsonParser;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index dee56fb7f7..07fe164957 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -229,8 +229,8 @@ class String : public Name {
// for strings containing supplementary characters, lexicographic ordering on
// sequences of UTF-16 code unit values differs from that on sequences of code
// point values.
- MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
- Handle<String> y);
+ V8_WARN_UNUSED_RESULT static ComparisonResult Compare(Handle<String> x,
+ Handle<String> y);
// Perform ES6 21.1.3.8, including checking arguments.
static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
@@ -271,7 +271,7 @@ class String : public Name {
// the result.
// A {start_index} can be passed to specify where to start scanning the
// replacement string.
- MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> GetSubstitution(
Isolate* isolate, Match* match, Handle<String> replacement,
int start_index = 0);
@@ -471,8 +471,8 @@ class SeqString : public String {
// Truncate the string in-place if possible and return the result.
// In case of new_length == 0, the empty string is returned without
// truncating the original string.
- MUST_USE_RESULT static Handle<String> Truncate(Handle<SeqString> string,
- int new_length);
+ V8_WARN_UNUSED_RESULT static Handle<String> Truncate(Handle<SeqString> string,
+ int new_length);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index e35f3f137b..24389c96a2 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -4,7 +4,7 @@
#include "src/objects/template-objects.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/property-descriptor.h"
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/optimized-compilation-info.cc
index 85d887ceb7..2b211c2fdf 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/optimized-compilation-info.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compilation-info.h"
+#include "src/optimized-compilation-info.h"
#include "src/api.h"
#include "src/ast/ast.h"
@@ -16,37 +16,26 @@
namespace v8 {
namespace internal {
-CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
- FunctionLiteral* literal)
- : CompilationInfo({}, AbstractCode::INTERPRETED_FUNCTION, zone) {
- // NOTE: The parse_info passed here represents the global information gathered
- // during parsing, but does not represent specific details of the actual
- // function literal being compiled for this CompilationInfo. As such,
- // parse_info->literal() might be different from literal, and only global
- // details of the script being parsed are relevant to this CompilationInfo.
- DCHECK_NOT_NULL(literal);
- literal_ = literal;
- source_range_map_ = parse_info->source_range_map();
-
- if (parse_info->is_eval()) MarkAsEval();
- if (parse_info->is_native()) MarkAsNative();
- if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
-}
-
-CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
- Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure)
- : CompilationInfo({}, AbstractCode::OPTIMIZED_FUNCTION, zone) {
+OptimizedCompilationInfo::OptimizedCompilationInfo(
+ Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> closure)
+ : OptimizedCompilationInfo({}, AbstractCode::OPTIMIZED_FUNCTION, zone) {
shared_info_ = shared;
closure_ = closure;
optimization_id_ = isolate->NextOptimizationId();
dependencies_.reset(new CompilationDependencies(isolate, zone));
+ SetFlag(kCalledWithCodeStartRegister);
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (!FLAG_turbo_disable_switch_jump_table) SetFlag(kSwitchJumpTableEnabled);
if (FLAG_untrusted_code_mitigations) MarkAsPoisoningRegisterArguments();
+ // TODO(yangguo): Disable this in case of debugging for crbug.com/826613
+ if (FLAG_analyze_environment_liveness) {
+ MarkAsAnalyzeEnvironmentLiveness();
+ }
+
// Collect source positions for optimized code when profiling or if debugger
// is active, to be able to get more precise source positions at the price of
// more memory consumption.
@@ -55,25 +44,29 @@ CompilationInfo::CompilationInfo(Zone* zone, Isolate* isolate,
}
}
-CompilationInfo::CompilationInfo(Vector<const char> debug_name, Zone* zone,
- Code::Kind code_kind)
- : CompilationInfo(debug_name, static_cast<AbstractCode::Kind>(code_kind),
- zone) {
- if (code_kind == Code::BYTECODE_HANDLER && has_untrusted_code_mitigations()) {
- SetFlag(CompilationInfo::kGenerateSpeculationPoisonOnEntry);
+OptimizedCompilationInfo::OptimizedCompilationInfo(
+ Vector<const char> debug_name, Zone* zone, Code::Kind code_kind)
+ : OptimizedCompilationInfo(
+ debug_name, static_cast<AbstractCode::Kind>(code_kind), zone) {
+ if (code_kind == Code::BYTECODE_HANDLER) {
+ SetFlag(OptimizedCompilationInfo::kCalledWithCodeStartRegister);
+ }
+#if ENABLE_GDB_JIT_INTERFACE
+#if DEBUG
+ if (code_kind == Code::BUILTIN || code_kind == Code::STUB) {
+ MarkAsSourcePositionsEnabled();
}
+#endif
+#endif
}
-CompilationInfo::CompilationInfo(Vector<const char> debug_name,
- AbstractCode::Kind code_kind, Zone* zone)
- : literal_(nullptr),
- source_range_map_(nullptr),
- flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
+OptimizedCompilationInfo::OptimizedCompilationInfo(
+ Vector<const char> debug_name, AbstractCode::Kind code_kind, Zone* zone)
+ : flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
code_kind_(code_kind),
stub_key_(0),
builtin_index_(Builtins::kNoBuiltinId),
osr_offset_(BailoutId::None()),
- feedback_vector_spec_(zone),
zone_(zone),
deferred_handles_(nullptr),
dependencies_(nullptr),
@@ -81,7 +74,7 @@ CompilationInfo::CompilationInfo(Vector<const char> debug_name,
optimization_id_(-1),
debug_name_(debug_name) {}
-CompilationInfo::~CompilationInfo() {
+OptimizedCompilationInfo::~OptimizedCompilationInfo() {
if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
shared_info()->DisableOptimization(bailout_reason());
}
@@ -90,33 +83,19 @@ CompilationInfo::~CompilationInfo() {
}
}
-DeclarationScope* CompilationInfo::scope() const {
- DCHECK_NOT_NULL(literal_);
- return literal_->scope();
-}
-
-int CompilationInfo::num_parameters() const {
- DCHECK(!IsStub());
- return scope()->num_parameters();
-}
-
-int CompilationInfo::num_parameters_including_this() const {
- DCHECK(!IsStub());
- return scope()->num_parameters() + 1;
-}
-
-void CompilationInfo::set_deferred_handles(
+void OptimizedCompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
DCHECK_NULL(deferred_handles_);
deferred_handles_.swap(deferred_handles);
}
-void CompilationInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
+void OptimizedCompilationInfo::set_deferred_handles(
+ DeferredHandles* deferred_handles) {
DCHECK_NULL(deferred_handles_);
deferred_handles_.reset(deferred_handles);
}
-void CompilationInfo::ReopenHandlesInNewHandleScope() {
+void OptimizedCompilationInfo::ReopenHandlesInNewHandleScope() {
if (!shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(*shared_info_);
}
@@ -125,14 +104,7 @@ void CompilationInfo::ReopenHandlesInNewHandleScope() {
}
}
-bool CompilationInfo::has_simple_parameters() {
- return scope()->has_simple_parameters();
-}
-
-std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
- if (literal()) {
- return literal()->GetDebugName();
- }
+std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
if (!shared_info().is_null()) {
return shared_info()->DebugName()->ToCString();
}
@@ -144,7 +116,7 @@ std::unique_ptr<char[]> CompilationInfo::GetDebugName() const {
return name;
}
-StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
+StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
switch (code_kind()) {
case Code::STUB:
case Code::BYTECODE_HANDLER:
@@ -154,8 +126,6 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
return StackFrame::WASM_COMPILED;
case Code::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
- case Code::WASM_TO_WASM_FUNCTION:
- return StackFrame::WASM_TO_WASM;
case Code::WASM_TO_JS_FUNCTION:
return StackFrame::WASM_TO_JS;
case Code::WASM_INTERPRETER_ENTRY:
@@ -166,38 +136,31 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
}
}
-int CompilationInfo::GetDeclareGlobalsFlags() const {
- return DeclareGlobalsEvalFlag::encode(is_eval()) |
- DeclareGlobalsNativeFlag::encode(is_native());
+bool OptimizedCompilationInfo::has_context() const {
+ return !closure().is_null();
}
-SourcePositionTableBuilder::RecordingMode
-CompilationInfo::SourcePositionRecordingMode() const {
- return is_native() ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
- : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
-}
-
-bool CompilationInfo::has_context() const { return !closure().is_null(); }
-
-Context* CompilationInfo::context() const {
+Context* OptimizedCompilationInfo::context() const {
return has_context() ? closure()->context() : nullptr;
}
-bool CompilationInfo::has_native_context() const {
+bool OptimizedCompilationInfo::has_native_context() const {
return !closure().is_null() && (closure()->native_context() != nullptr);
}
-Context* CompilationInfo::native_context() const {
+Context* OptimizedCompilationInfo::native_context() const {
return has_native_context() ? closure()->native_context() : nullptr;
}
-bool CompilationInfo::has_global_object() const { return has_native_context(); }
+bool OptimizedCompilationInfo::has_global_object() const {
+ return has_native_context();
+}
-JSGlobalObject* CompilationInfo::global_object() const {
+JSGlobalObject* OptimizedCompilationInfo::global_object() const {
return has_global_object() ? native_context()->global_object() : nullptr;
}
-int CompilationInfo::AddInlinedFunction(
+int OptimizedCompilationInfo::AddInlinedFunction(
Handle<SharedFunctionInfo> inlined_function, SourcePosition pos) {
int id = static_cast<int>(inlined_functions_.size());
inlined_functions_.push_back(InlinedFunctionHolder(inlined_function, pos));
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/optimized-compilation-info.h
index e68b6d88b4..f168db373a 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/optimized-compilation-info.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILATION_INFO_H_
-#define V8_COMPILATION_INFO_H_
+#ifndef V8_OPTIMIZED_COMPILATION_INFO_H_
+#define V8_OPTIMIZED_COMPILATION_INFO_H_
#include <memory>
@@ -30,37 +30,35 @@ class ParseInfo;
class SourceRangeMap;
class Zone;
-// CompilationInfo encapsulates some information known at compile time. It
-// is constructed based on the resources available at compile-time.
-// TODO(rmcilroy): Split CompilationInfo into two classes, one for unoptimized
-// compilation and one for optimized compilation, since they don't share much.
-class V8_EXPORT_PRIVATE CompilationInfo final {
+// OptimizedCompilationInfo encapsulates the information needed to compile
+// optimized code for a given function, and the results of the optimized
+// compilation.
+class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
public:
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
enum Flag {
- kIsEval = 1 << 0,
- kIsNative = 1 << 1,
- kCollectTypeProfile = 1 << 2,
- kAccessorInliningEnabled = 1 << 3,
- kFunctionContextSpecializing = 1 << 4,
- kInliningEnabled = 1 << 5,
- kPoisonLoads = 1 << 6,
- kDisableFutureOptimization = 1 << 7,
- kSplittingEnabled = 1 << 8,
- kSourcePositionsEnabled = 1 << 9,
- kBailoutOnUninitialized = 1 << 10,
- kLoopPeelingEnabled = 1 << 11,
- kUntrustedCodeMitigations = 1 << 12,
- kSwitchJumpTableEnabled = 1 << 13,
- kGenerateSpeculationPoisonOnEntry = 1 << 14,
- kPoisonRegisterArguments = 1 << 15,
+ kAccessorInliningEnabled = 1 << 0,
+ kFunctionContextSpecializing = 1 << 1,
+ kInliningEnabled = 1 << 2,
+ kPoisonLoads = 1 << 3,
+ kDisableFutureOptimization = 1 << 4,
+ kSplittingEnabled = 1 << 5,
+ kSourcePositionsEnabled = 1 << 6,
+ kBailoutOnUninitialized = 1 << 7,
+ kLoopPeelingEnabled = 1 << 8,
+ kUntrustedCodeMitigations = 1 << 9,
+ kSwitchJumpTableEnabled = 1 << 10,
+ kCalledWithCodeStartRegister = 1 << 11,
+ kPoisonRegisterArguments = 1 << 12,
+ kAllocationFoldingEnabled = 1 << 13,
+ kAnalyzeEnvironmentLiveness = 1 << 14,
};
// TODO(mtrofin): investigate if this might be generalized outside wasm, with
// the goal of better separating the compiler from where compilation lands. At
- // that point, the Handle<Code> member of CompilationInfo would also be
- // removed.
+ // that point, the Handle<Code> member of OptimizedCompilationInfo would also
+ // be removed.
struct WasmCodeDesc {
CodeDesc code_desc;
size_t safepoint_table_offset = 0;
@@ -69,37 +67,19 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Handle<ByteArray> source_positions_table;
};
- // Construct a compilation info for unoptimized compilation.
- CompilationInfo(Zone* zone, ParseInfo* parse_info, FunctionLiteral* literal);
// Construct a compilation info for optimized compilation.
- CompilationInfo(Zone* zone, Isolate* isolate,
- Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure);
+ OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> closure);
// Construct a compilation info for stub compilation (or testing).
- CompilationInfo(Vector<const char> debug_name, Zone* zone,
- Code::Kind code_kind);
- ~CompilationInfo();
-
- FunctionLiteral* literal() const { return literal_; }
- void set_literal(FunctionLiteral* literal) {
- DCHECK_NOT_NULL(literal);
- literal_ = literal;
- }
-
- bool has_source_range_map() const { return source_range_map_ != nullptr; }
- SourceRangeMap* source_range_map() const { return source_range_map_; }
- void set_source_range_map(SourceRangeMap* source_range_map) {
- source_range_map_ = source_range_map;
- }
+ OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
+ Code::Kind code_kind);
- DeclarationScope* scope() const;
+ ~OptimizedCompilationInfo();
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_offset_.IsNone(); }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- void set_shared_info(Handle<SharedFunctionInfo> shared_info) {
- shared_info_ = shared_info;
- }
bool has_shared_info() const { return !shared_info().is_null(); }
Handle<JSFunction> closure() const { return closure_; }
Handle<Code> code() const { return code_; }
@@ -114,25 +94,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
void set_builtin_index(int32_t index) { builtin_index_ = index; }
BailoutId osr_offset() const { return osr_offset_; }
JavaScriptFrame* osr_frame() const { return osr_frame_; }
- int num_parameters() const;
- int num_parameters_including_this() const;
-
- bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- bool has_asm_wasm_data() const { return !asm_wasm_data_.is_null(); }
- Handle<FixedArray> asm_wasm_data() const { return asm_wasm_data_; }
-
- // Flags used by unoptimized compilation.
-
- void MarkAsEval() { SetFlag(kIsEval); }
- bool is_eval() const { return GetFlag(kIsEval); }
-
- void MarkAsNative() { SetFlag(kIsNative); }
- bool is_native() const { return GetFlag(kIsNative); }
-
- void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
- bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
// Flags used by optimized compilation.
@@ -178,36 +139,37 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return GetFlag(kSwitchJumpTableEnabled);
}
- bool is_generating_speculation_poison_on_entry() const {
- bool enabled = GetFlag(kGenerateSpeculationPoisonOnEntry);
- DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ bool called_with_code_start_register() const {
+ bool enabled = GetFlag(kCalledWithCodeStartRegister);
return enabled;
}
void MarkAsPoisoningRegisterArguments() {
DCHECK(has_untrusted_code_mitigations());
- SetFlag(kGenerateSpeculationPoisonOnEntry);
SetFlag(kPoisonRegisterArguments);
}
bool is_poisoning_register_arguments() const {
bool enabled = GetFlag(kPoisonRegisterArguments);
DCHECK_IMPLIES(enabled, has_untrusted_code_mitigations());
+ DCHECK_IMPLIES(enabled, called_with_code_start_register());
return enabled;
}
- // Code getters and setters.
-
- void SetCode(Handle<Code> code) { code_ = code; }
-
- void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
- bytecode_array_ = bytecode_array;
+ void MarkAsAllocationFoldingEnabled() { SetFlag(kAllocationFoldingEnabled); }
+ bool is_allocation_folding_enabled() const {
+ return GetFlag(kAllocationFoldingEnabled);
}
- void SetAsmWasmData(Handle<FixedArray> asm_wasm_data) {
- asm_wasm_data_ = asm_wasm_data;
+ void MarkAsAnalyzeEnvironmentLiveness() {
+ SetFlag(kAnalyzeEnvironmentLiveness);
+ }
+ bool is_analyze_environment_liveness() const {
+ return GetFlag(kAnalyzeEnvironmentLiveness);
}
- FeedbackVectorSpec* feedback_vector_spec() { return &feedback_vector_spec_; }
+ // Code getters and setters.
+
+ void SetCode(Handle<Code> code) { code_ = code; }
bool has_context() const;
Context* context() const;
@@ -227,8 +189,7 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
}
bool IsStub() const {
return abstract_code_kind() != AbstractCode::OPTIMIZED_FUNCTION &&
- abstract_code_kind() != AbstractCode::WASM_FUNCTION &&
- abstract_code_kind() != AbstractCode::INTERPRETED_FUNCTION;
+ abstract_code_kind() != AbstractCode::WASM_FUNCTION;
}
void SetOptimizingForOsr(BailoutId osr_offset, JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
@@ -265,8 +226,6 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
return optimization_id_;
}
- bool has_simple_parameters();
-
struct InlinedFunctionHolder {
Handle<SharedFunctionInfo> shared_info;
@@ -296,33 +255,16 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
StackFrame::Type GetOutputStackFrameType() const;
- int GetDeclareGlobalsFlags() const;
-
- SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
-
- bool has_coverage_info() const { return !coverage_info_.is_null(); }
- Handle<CoverageInfo> coverage_info() const { return coverage_info_; }
- void set_coverage_info(Handle<CoverageInfo> coverage_info) {
- coverage_info_ = coverage_info;
- }
-
WasmCodeDesc* wasm_code_desc() { return &wasm_code_desc_; }
private:
- CompilationInfo(Vector<const char> debug_name, AbstractCode::Kind code_kind,
- Zone* zone);
+ OptimizedCompilationInfo(Vector<const char> debug_name,
+ AbstractCode::Kind code_kind, Zone* zone);
void SetFlag(Flag flag) { flags_ |= flag; }
-
- void SetFlag(Flag flag, bool value) {
- flags_ = value ? flags_ | flag : flags_ & ~flag;
- }
-
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
- FunctionLiteral* literal_;
- SourceRangeMap* source_range_map_; // Used when block coverage is enabled.
-
+ // Compilation flags.
unsigned flags_;
AbstractCode::Kind code_kind_;
@@ -340,19 +282,8 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
// Entry point when compiling for OSR, {BailoutId::None} otherwise.
BailoutId osr_offset_;
- // Holds the bytecode array generated by the interpreter.
- // TODO(rmcilroy/mstarzinger): Temporary work-around until compiler.cc is
- // refactored to avoid us needing to carry the BytcodeArray around.
- Handle<BytecodeArray> bytecode_array_;
-
- // Holds the asm_wasm array generated by the asmjs compiler.
- Handle<FixedArray> asm_wasm_data_;
-
- // Holds the feedback vector spec generated during compilation
- FeedbackVectorSpec feedback_vector_spec_;
-
// The zone from which the compilation pipeline working on this
- // CompilationInfo allocates.
+ // OptimizedCompilationInfo allocates.
Zone* zone_;
std::shared_ptr<DeferredHandles> deferred_handles_;
@@ -371,14 +302,10 @@ class V8_EXPORT_PRIVATE CompilationInfo final {
Vector<const char> debug_name_;
- // Encapsulates coverage information gathered by the bytecode generator.
- // Needs to be stored on the shared function info once compilation completes.
- Handle<CoverageInfo> coverage_info_;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
+ DISALLOW_COPY_AND_ASSIGN(OptimizedCompilationInfo);
};
} // namespace internal
} // namespace v8
-#endif // V8_COMPILATION_INFO_H_
+#endif // V8_OPTIMIZED_COMPILATION_INFO_H_
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 8657dab7f2..451d2e8131 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -24,7 +24,7 @@ ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
unicode_cache_(nullptr),
stack_limit_(0),
hash_seed_(0),
- compiler_hints_(0),
+ function_flags_(0),
start_position_(0),
end_position_(0),
parameters_end_pos_(kNoSourcePosition),
@@ -52,9 +52,9 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_wrapped_as_function(shared->is_wrapped());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_is_named_expression(shared->is_named_expression());
- set_compiler_hints(shared->compiler_hints());
- set_start_position(shared->start_position());
- set_end_position(shared->end_position());
+ set_function_flags(shared->flags());
+ set_start_position(shared->StartPosition());
+ set_end_position(shared->EndPosition());
function_literal_id_ = shared->function_literal_id();
set_language_mode(shared->language_mode());
set_asm_wasm_broken(shared->is_asm_wasm_broken());
@@ -66,10 +66,8 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_module(script->origin_options().IsModule());
DCHECK(!(is_eval() && is_module()));
- Handle<HeapObject> scope_info(shared->outer_scope_info());
- if (!scope_info->IsTheHole(isolate) &&
- Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
- set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
+ if (shared->HasOuterScopeInfo()) {
+ set_outer_scope_info(handle(shared->GetOuterScopeInfo()));
}
// CollectTypeProfile uses its own feedback slots. If we have existing
@@ -77,9 +75,9 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
// has the appropriate slots.
set_collect_type_profile(
isolate->is_collecting_type_profile() &&
- (shared->feedback_metadata()->length() == 0
- ? script->IsUserJavaScript()
- : shared->feedback_metadata()->HasTypeProfileSlot()));
+ (shared->HasFeedbackMetadata()
+ ? shared->feedback_metadata()->HasTypeProfileSlot()
+ : script->IsUserJavaScript()));
if (block_coverage_enabled() && script->IsUserJavaScript()) {
AllocateSourceRangeMap();
}
@@ -117,9 +115,9 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
p->set_toplevel(shared->is_toplevel());
p->set_allow_lazy_parsing(FLAG_lazy_inner_functions);
p->set_is_named_expression(shared->is_named_expression());
- p->set_compiler_hints(shared->compiler_hints());
- p->set_start_position(shared->start_position());
- p->set_end_position(shared->end_position());
+ p->set_function_flags(shared->flags());
+ p->set_start_position(shared->StartPosition());
+ p->set_end_position(shared->EndPosition());
p->function_literal_id_ = shared->function_literal_id();
p->set_language_mode(shared->language_mode());
@@ -136,7 +134,7 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
p->set_module(false);
DCHECK_NE(shared->kind(), FunctionKind::kModule);
- Handle<HeapObject> scope_info(shared->outer_scope_info());
+ Handle<HeapObject> scope_info(shared->GetOuterScopeInfo());
if (!scope_info->IsTheHole(isolate) &&
Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
p->set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
@@ -147,16 +145,16 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
bool ParseInfo::is_declaration() const {
- return SharedFunctionInfo::IsDeclarationBit::decode(compiler_hints_);
+ return SharedFunctionInfo::IsDeclarationBit::decode(function_flags_);
}
FunctionKind ParseInfo::function_kind() const {
- return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
+ return SharedFunctionInfo::FunctionKindBits::decode(function_flags_);
}
bool ParseInfo::requires_instance_fields_initializer() const {
return SharedFunctionInfo::RequiresInstanceFieldsInitializer::decode(
- compiler_hints_);
+ function_flags_);
}
void ParseInfo::InitFromIsolate(Isolate* isolate) {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 5a0cf138c1..08f15c865c 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -142,9 +142,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
uint32_t hash_seed() const { return hash_seed_; }
void set_hash_seed(uint32_t hash_seed) { hash_seed_ = hash_seed; }
- int compiler_hints() const { return compiler_hints_; }
- void set_compiler_hints(int compiler_hints) {
- compiler_hints_ = compiler_hints;
+ int function_flags() const { return function_flags_; }
+ void set_function_flags(int function_flags) {
+ function_flags_ = function_flags;
}
int start_position() const { return start_position_; }
@@ -195,7 +195,7 @@ class V8_EXPORT_PRIVATE ParseInfo {
return &pending_error_handler_;
}
- // Getters for individual compiler hints.
+ // Getters for individual function flags.
bool is_declaration() const;
FunctionKind function_kind() const;
bool requires_instance_fields_initializer() const;
@@ -265,7 +265,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
uint32_t hash_seed_;
- int compiler_hints_;
+ // TODO(leszeks): Move any remaining flags used here either to the flags_
+ // field or to other fields.
+ int function_flags_;
int start_position_;
int end_position_;
int parameters_end_pos_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 2d608d5f40..1a6df33083 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -307,6 +307,12 @@ class ParserBase {
void set_allow_harmony_bigint(bool allow) {
scanner()->set_allow_harmony_bigint(allow);
}
+ bool allow_harmony_numeric_separator() const {
+ return scanner()->allow_harmony_numeric_separator();
+ }
+ void set_allow_harmony_numeric_separator(bool allow) {
+ scanner()->set_allow_harmony_numeric_separator(allow);
+ }
bool allow_harmony_private_fields() const {
return scanner()->allow_harmony_private_fields();
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 8dc16a8b35..8b4d346f5a 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -448,6 +448,7 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_bigint(FLAG_harmony_bigint);
+ set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
set_allow_harmony_optional_catch_binding(FLAG_harmony_optional_catch_binding);
set_allow_harmony_private_fields(FLAG_harmony_private_fields);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
@@ -693,7 +694,7 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
DCHECK_EQ(factory()->zone(), info->zone());
// Initialize parser state.
- Handle<String> name(shared_info->name());
+ Handle<String> name(shared_info->Name());
info->set_function_name(ast_value_factory()->GetString(name));
scanner_.Initialize(info->character_stream(), info->is_module());
@@ -3491,35 +3492,10 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
DCHECK_EQ(cooked_strings->length(), expressions->length() + 1);
if (!tag) {
- Expression* first_string =
- factory()->NewStringLiteral(cooked_strings->at(0), kNoSourcePosition);
- if (expressions->length() == 0) return first_string;
-
- // Build N-ary addition op to simplify code-generation.
- // TODO(leszeks): Could we just store this expression in the
- // TemplateLiteralState and build it as we go?
- NaryOperation* expr = factory()->NewNaryOperation(
- Token::ADD, first_string, 2 * expressions->length());
-
- int i = 0;
- while (i < expressions->length()) {
- Expression* sub = expressions->at(i++);
- const AstRawString* cooked_str = cooked_strings->at(i);
- DCHECK_NOT_NULL(cooked_str);
-
- // Let middle be ToString(sub).
- ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(1, zone());
- args->Add(sub, zone());
- Expression* sub_to_string = factory()->NewCallRuntime(
- Runtime::kInlineToString, args, sub->position());
-
- expr->AddSubsequent(sub_to_string, sub->position());
- expr->AddSubsequent(
- factory()->NewStringLiteral(cooked_str, kNoSourcePosition),
- sub->position());
+ if (cooked_strings->length() == 1) {
+ return factory()->NewStringLiteral(cooked_strings->first(), pos);
}
- return expr;
+ return factory()->NewTemplateLiteral(cooked_strings, expressions, pos);
} else {
// GetTemplateObject
Expression* template_object =
@@ -3547,96 +3523,52 @@ bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
} // namespace
-ZoneList<Expression*>* Parser::PrepareSpreadArguments(
+ArrayLiteral* Parser::ArrayLiteralFromListWithSpread(
ZoneList<Expression*>* list) {
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
- if (list->length() == 1) {
- // Spread-call with single spread argument produces an InternalArray
- // containing the values from the array.
- //
- // Function is called or constructed with the produced array of arguments
- //
- // EG: Apply(Func, Spread(spread0))
- ZoneList<Expression*>* spread_list =
- new (zone()) ZoneList<Expression*>(0, zone());
- spread_list->Add(list->at(0)->AsSpread()->expression(), zone());
- args->Add(factory()->NewCallRuntime(Runtime::kSpreadIterablePrepare,
- spread_list, kNoSourcePosition),
- zone());
- return args;
- } else {
- // Spread-call with multiple arguments produces array literals for each
- // sequences of unspread arguments, and converts each spread iterable to
- // an Internal array. Finally, all of these produced arrays are flattened
- // into a single InternalArray, containing the arguments for the call.
- //
- // EG: Apply(Func, Flatten([unspread0, unspread1], Spread(spread0),
- // Spread(spread1), [unspread2, unspread3]))
- int i = 0;
- int n = list->length();
- while (i < n) {
- if (!list->at(i)->IsSpread()) {
- ZoneList<Expression*>* unspread =
- new (zone()) ZoneList<Expression*>(1, zone());
-
- // Push array of unspread parameters
- while (i < n && !list->at(i)->IsSpread()) {
- unspread->Add(list->at(i++), zone());
- }
- args->Add(factory()->NewArrayLiteral(unspread, kNoSourcePosition),
- zone());
-
- if (i == n) break;
- }
+ // If there's only a single spread argument, a fast path using CallWithSpread
+ // is taken.
+ DCHECK_LT(1, list->length());
- // Push eagerly spread argument
- ZoneList<Expression*>* spread_list =
- new (zone()) ZoneList<Expression*>(1, zone());
- spread_list->Add(list->at(i++)->AsSpread()->expression(), zone());
- args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
- spread_list, kNoSourcePosition),
- zone());
- }
-
- list = new (zone()) ZoneList<Expression*>(1, zone());
- list->Add(factory()->NewCallRuntime(Context::SPREAD_ARGUMENTS_INDEX, args,
- kNoSourcePosition),
- zone());
- return list;
+ // The arguments of the spread call become a single ArrayLiteral.
+ int first_spread = 0;
+ for (; first_spread < list->length() && !list->at(first_spread)->IsSpread();
+ ++first_spread) {
}
- UNREACHABLE();
+
+ DCHECK_LT(first_spread, list->length());
+ return factory()->NewArrayLiteral(list, first_spread, kNoSourcePosition);
}
Expression* Parser::SpreadCall(Expression* function,
- ZoneList<Expression*>* args, int pos,
+ ZoneList<Expression*>* args_list, int pos,
Call::PossiblyEval is_possibly_eval) {
// Handle this case in BytecodeGenerator.
- if (OnlyLastArgIsSpread(args)) {
- return factory()->NewCall(function, args, pos);
+ if (OnlyLastArgIsSpread(args_list)) {
+ return factory()->NewCall(function, args_list, pos);
}
if (function->IsSuperCallReference()) {
// Super calls
// $super_constructor = %_GetSuperConstructor(<this-function>)
// %reflect_construct($super_constructor, args, new.target)
-
- args = PrepareSpreadArguments(args);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
- Expression* super_constructor = factory()->NewCallRuntime(
- Runtime::kInlineGetSuperConstructor, tmp, pos);
- args->InsertAt(0, super_constructor, zone());
+ args->Add(factory()->NewCallRuntime(Runtime::kInlineGetSuperConstructor,
+ tmp, pos),
+ zone());
+ args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
args->Add(function->AsSuperCallReference()->new_target_var(), zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
pos);
} else {
- args = PrepareSpreadArguments(args);
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
Expression* home = ThisExpression(kNoSourcePosition);
- args->InsertAt(0, function, zone());
- args->InsertAt(1, home, zone());
+ args->Add(function, zone());
+ args->Add(home, zone());
} else {
Variable* temp = NewTemporary(ast_value_factory()->empty_string());
VariableProxy* obj = factory()->NewVariableProxy(temp);
@@ -3645,28 +3577,29 @@ Expression* Parser::SpreadCall(Expression* function,
kNoSourcePosition);
function = factory()->NewProperty(
assign_obj, function->AsProperty()->key(), kNoSourcePosition);
- args->InsertAt(0, function, zone());
+ args->Add(function, zone());
obj = factory()->NewVariableProxy(temp);
- args->InsertAt(1, obj, zone());
+ args->Add(obj, zone());
}
} else {
// Non-method calls
- args->InsertAt(0, function, zone());
- args->InsertAt(1, factory()->NewUndefinedLiteral(kNoSourcePosition),
- zone());
+ args->Add(function, zone());
+ args->Add(factory()->NewUndefinedLiteral(kNoSourcePosition), zone());
}
+ args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
return factory()->NewCallRuntime(Context::REFLECT_APPLY_INDEX, args, pos);
}
}
Expression* Parser::SpreadCallNew(Expression* function,
- ZoneList<Expression*>* args, int pos) {
- if (OnlyLastArgIsSpread(args)) {
+ ZoneList<Expression*>* args_list, int pos) {
+ if (OnlyLastArgIsSpread(args_list)) {
// Handle in BytecodeGenerator.
- return factory()->NewCallNew(function, args, pos);
+ return factory()->NewCallNew(function, args_list, pos);
}
- args = PrepareSpreadArguments(args);
- args->InsertAt(0, function, zone());
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+ args->Add(function, zone());
+ args->Add(ArrayLiteralFromListWithSpread(args_list), zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index dcc222da0f..2da4490906 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -502,7 +502,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
Expression* tag);
- ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
+ ArrayLiteral* ArrayLiteralFromListWithSpread(ZoneList<Expression*>* list);
Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
int pos, Call::PossiblyEval is_possibly_eval);
Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
@@ -706,6 +706,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
}
+ // A shortcut for performing a ToString operation
+ V8_INLINE Expression* ToString(Expression* expr) {
+ if (expr->IsStringLiteral()) return expr;
+ ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(expr, zone());
+ return factory()->NewCallRuntime(Runtime::kInlineToString, args,
+ expr->position());
+ }
+
// Returns true if we have a binary expression between two numeric
// literals. In that case, *x will be changed to an expression which is the
// computed value.
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index d34f826a23..dc66876e7d 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -63,7 +63,7 @@ bool ParseFunction(ParseInfo* info, Handle<SharedFunctionInfo> shared_info,
source = String::Flatten(source);
isolate->counters()->total_parse_size()->Increment(source->length());
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
- source, shared_info->start_position(), shared_info->end_position()));
+ source, shared_info->StartPosition(), shared_info->EndPosition()));
info->set_character_stream(std::move(stream));
VMState<PARSER> state(isolate);
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index daa126d443..b0dfe0a66f 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -578,13 +578,15 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
// RecurseIntoSubpattern above.
// let array = [];
+ // let index = 0;
// while (!done) {
// done = true; // If .next, .done or .value throws, don't close.
// result = IteratorNext(iterator);
// if (!result.done) {
- // %AppendElement(array, result.value);
+ // StoreInArrayLiteral(array, index, result.value);
// done = false;
// }
+ // index++;
// }
// let array = [];
@@ -595,6 +597,10 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewArrayLiteral(empty_exprs, kNoSourcePosition));
}
+ // let index = 0;
+ Variable* index =
+ CreateTempVar(factory()->NewSmiLiteral(0, kNoSourcePosition));
+
// done = true;
Statement* set_done = factory()->NewExpressionStatement(
factory()->NewAssignment(
@@ -609,19 +615,18 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
result, IteratorType::kNormal, nopos),
nopos);
- // %AppendElement(array, result.value);
- Statement* append_element;
+ // StoreInArrayLiteral(array, index, result.value);
+ Statement* store;
{
- auto args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewVariableProxy(array), zone());
- args->Add(factory()->NewProperty(
- factory()->NewVariableProxy(result),
- factory()->NewStringLiteral(
- ast_value_factory()->value_string(), nopos),
- nopos),
- zone());
- append_element = factory()->NewExpressionStatement(
- factory()->NewCallRuntime(Runtime::kAppendElement, args, nopos),
+ auto value = factory()->NewProperty(
+ factory()->NewVariableProxy(result),
+ factory()->NewStringLiteral(ast_value_factory()->value_string(),
+ nopos),
+ nopos);
+ store = factory()->NewExpressionStatement(
+ factory()->NewStoreInArrayLiteral(factory()->NewVariableProxy(array),
+ factory()->NewVariableProxy(index),
+ value, nopos),
nopos);
}
@@ -632,8 +637,8 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
factory()->NewBooleanLiteral(false, nopos), nopos),
nopos);
- // if (!result.done) { #append_element; #unset_done }
- Statement* maybe_append_and_unset_done;
+ // if (!result.done) { #store; #unset_done }
+ Statement* maybe_store_and_unset_done;
{
Expression* result_done =
factory()->NewProperty(factory()->NewVariableProxy(result),
@@ -642,27 +647,38 @@ void PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
nopos);
Block* then = factory()->NewBlock(2, true);
- then->statements()->Add(append_element, zone());
+ then->statements()->Add(store, zone());
then->statements()->Add(unset_done, zone());
- maybe_append_and_unset_done = factory()->NewIfStatement(
+ maybe_store_and_unset_done = factory()->NewIfStatement(
factory()->NewUnaryOperation(Token::NOT, result_done, nopos), then,
factory()->NewEmptyStatement(nopos), nopos);
}
+ // index++;
+ Statement* increment_index;
+ {
+ increment_index = factory()->NewExpressionStatement(
+ factory()->NewCountOperation(
+ Token::INC, false, factory()->NewVariableProxy(index), nopos),
+ nopos);
+ }
+
// while (!done) {
// #set_done;
// #get_next;
- // #maybe_append_and_unset_done;
+ // #maybe_store_and_unset_done;
+ // #increment_index;
// }
WhileStatement* loop = factory()->NewWhileStatement(nullptr, nopos);
{
Expression* condition = factory()->NewUnaryOperation(
Token::NOT, factory()->NewVariableProxy(done), nopos);
- Block* body = factory()->NewBlock(3, true);
+ Block* body = factory()->NewBlock(4, true);
body->statements()->Add(set_done, zone());
body->statements()->Add(get_next, zone());
- body->statements()->Add(maybe_append_and_unset_done, zone());
+ body->statements()->Add(maybe_store_and_unset_done, zone());
+ body->statements()->Add(increment_index, zone());
loop->Initialize(condition, body);
}
@@ -767,9 +783,11 @@ NOT_A_PATTERN(ResolvedProperty)
NOT_A_PATTERN(ReturnStatement)
NOT_A_PATTERN(SloppyBlockFunctionStatement)
NOT_A_PATTERN(Spread)
+NOT_A_PATTERN(StoreInArrayLiteral)
NOT_A_PATTERN(SuperPropertyReference)
NOT_A_PATTERN(SuperCallReference)
NOT_A_PATTERN(SwitchStatement)
+NOT_A_PATTERN(TemplateLiteral)
NOT_A_PATTERN(ThisFunction)
NOT_A_PATTERN(Throw)
NOT_A_PATTERN(TryCatchStatement)
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index a6dd075fec..942e5aecc5 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -182,7 +182,8 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
octal_pos_(Location::invalid()),
octal_message_(MessageTemplate::kNone),
found_html_comment_(false),
- allow_harmony_bigint_(false) {}
+ allow_harmony_bigint_(false),
+ allow_harmony_numeric_separator_(false) {}
void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
DCHECK_NOT_NULL(source);
@@ -1014,7 +1015,7 @@ bool Scanner::ScanEscape() {
case '5': // fall through
case '6': // fall through
case '7':
- c = ScanOctalEscape<capture_raw>(c, 2);
+ c = ScanOctalEscape<capture_raw>(c, 2, in_template_literal);
break;
}
@@ -1023,9 +1024,8 @@ bool Scanner::ScanEscape() {
return true;
}
-
template <bool capture_raw>
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+uc32 Scanner::ScanOctalEscape(uc32 c, int length, bool in_template_literal) {
uc32 x = c - '0';
int i = 0;
for (; i < length; i++) {
@@ -1043,7 +1043,9 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
// occur before the "use strict" directive.
if (c != '0' || i > 0 || c0_ == '8' || c0_ == '9') {
octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
- octal_message_ = MessageTemplate::kStrictOctalEscape;
+ octal_message_ = in_template_literal
+ ? MessageTemplate::kTemplateOctalLiteral
+ : MessageTemplate::kStrictOctalEscape;
}
return x;
}
@@ -1223,14 +1225,99 @@ Handle<String> Scanner::SourceMappingUrl(Isolate* isolate) const {
return tmp;
}
-void Scanner::ScanDecimalDigits() {
- while (IsDecimalDigit(c0_))
+bool Scanner::ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
+ bool is_check_first_digit) {
+ // we must have at least one digit after 'x'/'b'/'o'
+ if (is_check_first_digit && !predicate(c0_)) return false;
+
+ bool separator_seen = false;
+ while (predicate(c0_) || c0_ == '_') {
+ if (c0_ == '_') {
+ Advance<false, false>();
+ if (c0_ == '_') {
+ ReportScannerError(Location(source_pos(), source_pos() + 1),
+ MessageTemplate::kContinuousNumericSeparator);
+ return false;
+ }
+ separator_seen = true;
+ continue;
+ }
+ separator_seen = false;
AddLiteralCharAdvance();
+ }
+
+ if (separator_seen) {
+ ReportScannerError(Location(source_pos(), source_pos() + 1),
+ MessageTemplate::kTrailingNumericSeparator);
+ return false;
+ }
+
+ return true;
+}
+
+bool Scanner::ScanDecimalDigits() {
+ if (allow_harmony_numeric_separator()) {
+ return ScanDigitsWithNumericSeparators(&IsDecimalDigit, false);
+ }
+ while (IsDecimalDigit(c0_)) {
+ AddLiteralCharAdvance();
+ }
+ return true;
+}
+
+bool Scanner::ScanDecimalAsSmiWithNumericSeparators(uint64_t* value) {
+ bool separator_seen = false;
+ while (IsDecimalDigit(c0_) || c0_ == '_') {
+ if (c0_ == '_') {
+ Advance<false, false>();
+ if (c0_ == '_') {
+ ReportScannerError(Location(source_pos(), source_pos() + 1),
+ MessageTemplate::kContinuousNumericSeparator);
+ return false;
+ }
+ separator_seen = true;
+ continue;
+ }
+ separator_seen = false;
+ *value = 10 * *value + (c0_ - '0');
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ }
+
+ if (separator_seen) {
+ ReportScannerError(Location(source_pos(), source_pos() + 1),
+ MessageTemplate::kTrailingNumericSeparator);
+ return false;
+ }
+
+ return true;
+}
+
+bool Scanner::ScanDecimalAsSmi(uint64_t* value) {
+ if (allow_harmony_numeric_separator()) {
+ return ScanDecimalAsSmiWithNumericSeparators(value);
+ }
+
+ while (IsDecimalDigit(c0_)) {
+ *value = 10 * *value + (c0_ - '0');
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ }
+ return true;
}
bool Scanner::ScanBinaryDigits() {
+ if (allow_harmony_numeric_separator()) {
+ return ScanDigitsWithNumericSeparators(&IsBinaryDigit, true);
+ }
+
// we must have at least one binary digit after 'b'/'B'
- if (!IsBinaryDigit(c0_)) return false;
+ if (!IsBinaryDigit(c0_)) {
+ return false;
+ }
+
while (IsBinaryDigit(c0_)) {
AddLiteralCharAdvance();
}
@@ -1238,33 +1325,51 @@ bool Scanner::ScanBinaryDigits() {
}
bool Scanner::ScanOctalDigits() {
+ if (allow_harmony_numeric_separator()) {
+ return ScanDigitsWithNumericSeparators(&IsOctalDigit, true);
+ }
+
// we must have at least one octal digit after 'o'/'O'
- if (!IsOctalDigit(c0_)) return false;
+ if (!IsOctalDigit(c0_)) {
+ return false;
+ }
+
while (IsOctalDigit(c0_)) {
AddLiteralCharAdvance();
}
-
return true;
}
-bool Scanner::ScanImplicitOctalDigits(int start_pos) {
- // (possible) octal number
+bool Scanner::ScanImplicitOctalDigits(int start_pos,
+ Scanner::NumberKind* kind) {
+ *kind = IMPLICIT_OCTAL;
+
while (true) {
- if (c0_ == '8' || c0_ == '9') return false;
+ // (possible) octal number
+ if (c0_ == '8' || c0_ == '9') {
+ *kind = DECIMAL_WITH_LEADING_ZERO;
+ return true;
+ }
if (c0_ < '0' || '7' < c0_) {
// Octal literal finished.
octal_pos_ = Location(start_pos, source_pos());
octal_message_ = MessageTemplate::kStrictOctalLiteral;
- break;
+ return true;
}
AddLiteralCharAdvance();
}
- return true;
}
bool Scanner::ScanHexDigits() {
+ if (allow_harmony_numeric_separator()) {
+ return ScanDigitsWithNumericSeparators(&IsHexDigit, true);
+ }
+
// we must have at least one hex digit after 'x'/'X'
- if (!IsHexDigit(c0_)) return false;
+ if (!IsHexDigit(c0_)) {
+ return false;
+ }
+
while (IsHexDigit(c0_)) {
AddLiteralCharAdvance();
}
@@ -1275,21 +1380,13 @@ bool Scanner::ScanSignedInteger() {
if (c0_ == '+' || c0_ == '-') AddLiteralCharAdvance();
// we must have at least one decimal digit after 'e'/'E'
if (!IsDecimalDigit(c0_)) return false;
- ScanDecimalDigits();
- return true;
+ return ScanDecimalDigits();
}
Token::Value Scanner::ScanNumber(bool seen_period) {
DCHECK(IsDecimalDigit(c0_)); // the first digit of the number or the fraction
- enum {
- DECIMAL,
- DECIMAL_WITH_LEADING_ZERO,
- HEX,
- OCTAL,
- IMPLICIT_OCTAL,
- BINARY
- } kind = DECIMAL;
+ NumberKind kind = DECIMAL;
LiteralScope literal(this);
bool at_start = !seen_period;
@@ -1297,8 +1394,11 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (seen_period) {
// we have already seen a decimal point of the float
AddLiteralChar('.');
- ScanDecimalDigits(); // we know we have at least one digit
-
+ if (allow_harmony_numeric_separator() && c0_ == '_') {
+ return Token::ILLEGAL;
+ }
+ // we know we have at least one digit
+ if (!ScanDecimalDigits()) return Token::ILLEGAL;
} else {
// if the first character is '0' we must check for octals and hex
if (c0_ == '0') {
@@ -1320,12 +1420,18 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (!ScanBinaryDigits()) return Token::ILLEGAL;
} else if ('0' <= c0_ && c0_ <= '7') {
kind = IMPLICIT_OCTAL;
- if (!ScanImplicitOctalDigits(start_pos)) {
- kind = DECIMAL_WITH_LEADING_ZERO;
+ if (!ScanImplicitOctalDigits(start_pos, &kind)) {
+ return Token::ILLEGAL;
+ }
+ if (kind == DECIMAL_WITH_LEADING_ZERO) {
at_start = false;
}
} else if (c0_ == '8' || c0_ == '9') {
kind = DECIMAL_WITH_LEADING_ZERO;
+ } else if (allow_harmony_numeric_separator() && c0_ == '_') {
+ ReportScannerError(Location(source_pos(), source_pos() + 1),
+ MessageTemplate::kZeroDigitNumericSeparator);
+ return Token::ILLEGAL;
}
}
@@ -1334,12 +1440,9 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// This is an optimization for parsing Decimal numbers as Smi's.
if (at_start) {
uint64_t value = 0;
- while (IsDecimalDigit(c0_)) {
- value = 10 * value + (c0_ - '0');
-
- uc32 first_char = c0_;
- Advance<false, false>();
- AddLiteralChar(first_char);
+ // scan subsequent decimal digits
+ if (!ScanDecimalAsSmi(&value)) {
+ return Token::ILLEGAL;
}
if (next_.literal_chars->one_byte_literal().length() <= 10 &&
@@ -1358,11 +1461,14 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
HandleLeadSurrogate();
}
- ScanDecimalDigits(); // optional
+ if (!ScanDecimalDigits()) return Token::ILLEGAL;
if (c0_ == '.') {
seen_period = true;
AddLiteralCharAdvance();
- ScanDecimalDigits(); // optional
+ if (allow_harmony_numeric_separator() && c0_ == '_') {
+ return Token::ILLEGAL;
+ }
+ if (!ScanDecimalDigits()) return Token::ILLEGAL;
}
}
}
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index fe7d754319..77ea4d3272 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -366,6 +366,12 @@ class Scanner {
void set_allow_harmony_private_fields(bool allow) {
allow_harmony_private_fields_ = allow;
}
+ bool allow_harmony_numeric_separator() const {
+ return allow_harmony_numeric_separator_;
+ }
+ void set_allow_harmony_numeric_separator(bool allow) {
+ allow_harmony_numeric_separator_ = allow;
+ }
private:
// Scoped helper for saving & restoring scanner error state.
@@ -490,12 +496,21 @@ class Scanner {
Token::Value contextual_token;
};
+ enum NumberKind {
+ BINARY,
+ OCTAL,
+ IMPLICIT_OCTAL,
+ HEX,
+ DECIMAL,
+ DECIMAL_WITH_LEADING_ZERO
+ };
+
static const int kCharacterLookaheadBufferSize = 1;
const int kMaxAscii = 127;
// Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
template <bool capture_raw>
- uc32 ScanOctalEscape(uc32 c, int length);
+ uc32 ScanOctalEscape(uc32 c, int length, bool in_template_literal);
// Call this after setting source_ to the input.
void Init() {
@@ -720,12 +735,17 @@ class Scanner {
// Scans a possible HTML comment -- begins with '<!'.
Token::Value ScanHtmlComment();
- void ScanDecimalDigits();
+ bool ScanDigitsWithNumericSeparators(bool (*predicate)(uc32 ch),
+ bool is_check_first_digit);
+ bool ScanDecimalDigits();
+ // Optimized function to scan decimal number as Smi.
+ bool ScanDecimalAsSmi(uint64_t* value);
+ bool ScanDecimalAsSmiWithNumericSeparators(uint64_t* value);
bool ScanHexDigits();
bool ScanBinaryDigits();
bool ScanSignedInteger();
bool ScanOctalDigits();
- bool ScanImplicitOctalDigits(int start_pos);
+ bool ScanImplicitOctalDigits(int start_pos, NumberKind* kind);
Token::Value ScanNumber(bool seen_period);
Token::Value ScanIdentifierOrKeyword();
@@ -817,6 +837,7 @@ class Scanner {
// Harmony flags to allow ESNext features.
bool allow_harmony_bigint_;
bool allow_harmony_private_fields_;
+ bool allow_harmony_numeric_separator_;
MessageTemplate::Template scanner_error_;
Location scanner_error_location_;
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index ac1362c9a9..0e427df4ef 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -211,7 +211,7 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
// We only support non-interpreted functions.
if (!abstract_code->IsCode()) return;
Code* code = abstract_code->GetCode();
- DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
+ DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
// Debug info has to be emitted first.
if (FLAG_perf_prof && shared != nullptr) {
@@ -223,11 +223,11 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
}
const char* code_name = name;
- uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
// Code generated by Turbofan will have the safepoint table directly after
// instructions. There is no need to record the safepoint table itself.
uint32_t code_size = code->is_turbofanned() ? code->safepoint_table_offset()
- : code->instruction_size();
+ : code->InstructionSize();
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
@@ -235,8 +235,8 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
WriteJitCodeLoadEntry(code_pointer, code_size, code_name, length);
}
-void PerfJitLogger::LogRecordedBuffer(wasm::WasmCode* code, const char* name,
- int length) {
+void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
+ const char* name, int length) {
base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
if (perf_output_handle_ == nullptr) return;
@@ -270,42 +270,6 @@ void PerfJitLogger::WriteJitCodeLoadEntry(const uint8_t* code_pointer,
LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
}
-void PerfJitLogger::LogRecordedBuffer(const InstructionStream* stream,
- const char* name, int length) {
- if (FLAG_perf_basic_prof_only_functions) return;
-
- base::LockGuard<base::RecursiveMutex> guard_file(file_mutex_.Pointer());
-
- if (perf_output_handle_ == nullptr) return;
-
- const char* code_name = name;
- uint8_t* code_pointer = stream->bytes();
- uint32_t code_size = static_cast<uint32_t>(stream->byte_length());
-
- // TODO(jgruber): Do we need unwinding info?
-
- static const char string_terminator[] = "\0";
-
- PerfJitCodeLoad code_load;
- code_load.event_ = PerfJitCodeLoad::kLoad;
- code_load.size_ = sizeof(code_load) + length + 1 + code_size;
- code_load.time_stamp_ = GetTimestamp();
- code_load.process_id_ =
- static_cast<uint32_t>(base::OS::GetCurrentProcessId());
- code_load.thread_id_ = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
- code_load.vma_ = 0x0; // Our addresses are absolute.
- code_load.code_address_ = reinterpret_cast<uint64_t>(code_pointer);
- code_load.code_size_ = code_size;
- code_load.code_id_ = code_index_;
-
- code_index_++;
-
- LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
- LogWriteBytes(code_name, length);
- LogWriteBytes(string_terminator, 1);
- LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
-}
-
namespace {
std::unique_ptr<char[]> GetScriptName(Handle<Script> script) {
@@ -354,7 +318,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
debug_info.time_stamp_ = GetTimestamp();
- debug_info.address_ = reinterpret_cast<uint64_t>(code->instruction_start());
+ debug_info.address_ = reinterpret_cast<uint64_t>(code->InstructionStart());
debug_info.entry_count_ = entry_count;
uint32_t size = sizeof(debug_info);
@@ -377,7 +341,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
debug_info.size_ = size + padding;
LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
- Address code_start = code->instruction_start();
+ Address code_start = code->InstructionStart();
for (SourcePositionTableIterator iterator(code->SourcePositionTable());
!iterator.done(); iterator.Advance()) {
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/perf-jit.h
index 8e7c6b5939..ef83e9423d 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/perf-jit.h
@@ -54,9 +54,7 @@ class PerfJitLogger : public CodeEventLogger {
uint64_t GetTimestamp();
void LogRecordedBuffer(AbstractCode* code, SharedFunctionInfo* shared,
const char* name, int length) override;
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override;
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override;
// Extension added to V8 log file name to get the low-level log name.
@@ -134,12 +132,7 @@ class PerfJitLogger : public CodeEventLogger {
UNIMPLEMENTED();
}
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override {
- UNIMPLEMENTED();
- }
-
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 166a854638..ae37b02a24 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -91,7 +91,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
if (FLAG_enable_embedded_constant_pool &&
Assembler::IsConstantPoolLoadStart(pc_)) {
@@ -192,6 +193,19 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
@@ -206,6 +220,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
@@ -237,6 +256,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index eb16e46505..d0a6389393 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -427,11 +427,11 @@ const int kEndOfChain = -4;
enum {
kUnboundMovLabelOffsetOpcode = 0 << 26,
kUnboundAddLabelOffsetOpcode = 1 << 26,
- kUnboundMovLabelAddrOpcode = 2 << 26,
- kUnboundJumpTableEntryOpcode = 3 << 26
+ kUnboundAddLabelLongOffsetOpcode = 2 << 26,
+ kUnboundMovLabelAddrOpcode = 3 << 26,
+ kUnboundJumpTableEntryOpcode = 4 << 26
};
-
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
@@ -448,6 +448,7 @@ int Assembler::target_at(int pos) {
break;
case kUnboundMovLabelOffsetOpcode:
case kUnboundAddLabelOffsetOpcode:
+ case kUnboundAddLabelLongOffsetOpcode:
case kUnboundMovLabelAddrOpcode:
case kUnboundJumpTableEntryOpcode:
link = SIGN_EXT_IMM26(instr & kImm26Mask);
@@ -508,15 +509,21 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
patcher.bitwise_mov32(dst, offset);
break;
}
+ case kUnboundAddLabelLongOffsetOpcode:
case kUnboundAddLabelOffsetOpcode: {
// dst = base + position + immediate
Instr operands = instr_at(pos + kInstrSize);
- Register dst = Register::from_code((operands >> 21) & 0x1F);
- Register base = Register::from_code((operands >> 16) & 0x1F);
- int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
- PatchingAssembler patcher(isolate_data(),
- reinterpret_cast<byte*>(buffer_ + pos), 2);
+ Register dst = Register::from_code((operands >> 27) & 0x1F);
+ Register base = Register::from_code((operands >> 22) & 0x1F);
+ int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
+ ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
+ : (SIGN_EXT_IMM22(operands & kImm22Mask));
+ int32_t offset = target_pos + delta;
+ PatchingAssembler patcher(
+ isolate_data(), reinterpret_cast<byte*>(buffer_ + pos),
+ 2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
patcher.bitwise_add32(dst, base, offset);
+ if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
break;
}
case kUnboundMovLabelAddrOpcode: {
@@ -1483,11 +1490,16 @@ void Assembler::add_label_offset(Register dst, Register base, Label* label,
DCHECK_EQ(0, link & 3);
link >>= 2;
DCHECK(is_int26(link));
- DCHECK(is_int16(delta));
-
BlockTrampolinePoolScope block_trampoline_pool(this);
- emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
- emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
+
+ emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
+ : kUnboundAddLabelLongOffsetOpcode) |
+ (link & kImm26Mask));
+ emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
+
+ if (!is_int22(delta)) {
+ emit(delta);
+ }
}
}
@@ -2002,7 +2014,7 @@ void Assembler::GrowBuffer(int needed) {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 271c6e69db..2ba8e38795 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -287,8 +287,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
- sizeof(Register) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -329,8 +329,8 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(DoubleRegister) &&
- sizeof(DoubleRegister) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
+static_assert(sizeof(DoubleRegister) == sizeof(int),
"DoubleRegister can efficiently be passed by value");
typedef DoubleRegister FloatRegister;
@@ -450,7 +450,6 @@ class MemOperand BASE_EMBEDDED {
explicit MemOperand(Register ra, Register rb);
int32_t offset() const {
- DCHECK(rb_ == no_reg);
return offset_;
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 742d89a590..9fd9d4d87e 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -159,45 +159,32 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r10;
Label call_runtime, done, int_exponent;
- if (exponent_type() == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ lfd(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
- double_scratch);
- __ beq(&int_exponent);
+ // Detect integer exponents stored as double.
+ __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, double_scratch);
+ __ beq(&int_exponent);
- __ mflr(r0);
- __ push(r0);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- }
- __ pop(r0);
- __ mtlr(r0);
- __ MovFromFloatResult(double_result);
- __ b(&done);
+ __ mflr(r0);
+ __ push(r0);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
+ __ pop(r0);
+ __ mtlr(r0);
+ __ MovFromFloatResult(double_result);
+ __ b(&done);
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type() == INTEGER) {
- __ mr(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ mr(exponent, scratch);
- }
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mr(exponent, scratch);
+
__ fmr(double_scratch, double_base); // Back up base.
__ li(scratch2, Operand(1));
__ ConvertIntToDouble(scratch2, double_result);
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index b54a44c6ed..a2a9013b1c 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/ppc/simulator-ppc.h"
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index bab4efe0ac..44749d4eb6 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -39,6 +39,9 @@ const int kLoadDoubleMaxReachBits = 15;
// sign-extend the least significant 16-bits of value <imm>
#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+// sign-extend the least significant 22-bits of value <imm>
+#define SIGN_EXT_IMM22(imm) ((static_cast<int>(imm) << 10) >> 10)
+
// sign-extend the least significant 26-bits of value <imm>
#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
@@ -2596,6 +2599,7 @@ enum {
kImm24Mask = (1 << 24) - 1,
kOff16Mask = (1 << 16) - 1,
kImm16Mask = (1 << 16) - 1,
+ kImm22Mask = (1 << 22) - 1,
kImm26Mask = (1 << 26) - 1,
kBOfieldMask = 0x1f << 21,
kOpcodeMask = 0x3f << 26,
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/ppc/frame-constants-ppc.h
index c822de877b..f91601c046 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/ppc/frame-constants-ppc.h
@@ -12,36 +12,38 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
+ static constexpr int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 68efa84c72..7747c9a4a2 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -168,7 +168,7 @@ void TurboAssembler::Call(Register target) {
}
void MacroAssembler::CallJSEntry(Register target) {
- DCHECK(target == ip);
+ CHECK(target == r5);
Call(target);
}
@@ -825,7 +825,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
}
#endif
-void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
lwz(kConstantPoolRegister,
MemOperand(code_target_address,
@@ -833,8 +833,24 @@ void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
}
+void TurboAssembler::LoadPC(Register dst) {
+ b(4, SetLK);
+ mflr(dst);
+}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ Label current_pc;
+ mov_label_addr(dst, &current_pc);
+
+ bind(&current_pc);
+ subi(dst, dst, Operand(pc_offset()));
+}
+
void TurboAssembler::LoadConstantPoolPointerRegister() {
- mov_label_addr(kConstantPoolRegister, ConstantPoolPosition());
+ LoadPC(kConstantPoolRegister);
+ int32_t delta = -pc_offset() + 4;
+ add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
+ ConstantPoolPosition(), delta);
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
@@ -850,7 +866,6 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) {
}
void TurboAssembler::Prologue() {
- DCHECK(base != no_reg);
PushStandardFrame(r4);
if (FLAG_enable_embedded_constant_pool) {
// base contains prologue address
@@ -1178,15 +1193,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- mov(r7, Operand(debug_is_active));
- LoadByte(r7, MemOperand(r7), r0);
- extsb(r7, r7);
- CmpSmiLiteral(r7, Smi::kZero, r0);
- beq(&skip_hook);
+ Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
@@ -1194,17 +1201,8 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
CmpSmiLiteral(r7, Smi::kZero, r0);
- bne(&call_hook);
-
- LoadP(r7, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(r7, &skip_hook);
- LoadP(r7, FieldMemOperand(r7, DebugInfo::kFlagsOffset));
- SmiUntag(r0, r7);
- andi(r0, r0, Operand(DebugInfo::kBreakAtEntry));
- beq(&skip_hook, cr0);
+ beq(&skip_hook);
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1626,12 +1624,21 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
- mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ mov(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<intptr_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ cmpi(in, Operand(kClearedWeakHeapObject));
+ beq(target_if_cleared);
+
+ mov(r0, Operand(~kWeakHeapObjectMask));
+ and_(out, in, r0);
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -1748,6 +1755,20 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
+ push(object);
+ LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
+ andi(object, object, Operand(Map::IsConstructorBit::kMask));
+ pop(object);
+ Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2393,11 +2414,12 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
+ DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
if (!is_int16(offset)) {
/* cannot use d-form */
- DCHECK(scratch != no_reg);
+ DCHECK_NE(scratch, no_reg);
mov(scratch, Operand(offset));
LoadPX(dst, MemOperand(mem.ra(), scratch));
} else {
@@ -2805,8 +2827,8 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
- LoadP(src, dst);
- StoreP(scratch, dst);
+ LoadP(src, dst, r0);
+ StoreP(scratch, dst, r0);
}
void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
@@ -2816,10 +2838,25 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
if (dst.ra() != r0) DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
if (dst.rb() != r0) DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadP(scratch_0, src);
- LoadP(scratch_1, dst);
- StoreP(scratch_0, dst);
- StoreP(scratch_1, src);
+ if (is_int16(src.offset()) || is_int16(dst.offset())) {
+ if (!is_int16(src.offset())) {
+ // swap operand
+ MemOperand temp = src;
+ src = dst;
+ dst = temp;
+ }
+ LoadP(scratch_1, dst, scratch_0);
+ LoadP(scratch_0, src);
+ StoreP(scratch_1, src);
+ StoreP(scratch_0, dst, scratch_1);
+ } else {
+ LoadP(scratch_1, dst, scratch_0);
+ push(scratch_1);
+ LoadP(scratch_0, src, scratch_1);
+ StoreP(scratch_0, dst, scratch_1);
+ pop(scratch_1);
+ StoreP(scratch_1, src, scratch_0);
+ }
}
void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
@@ -2835,18 +2872,18 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
- LoadSingle(src, dst);
- StoreSingle(scratch, dst);
+ LoadSingle(src, dst, r0);
+ StoreSingle(scratch, dst, r0);
}
void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadSingle(scratch_0, src);
- LoadSingle(scratch_1, dst);
- StoreSingle(scratch_0, dst);
- StoreSingle(scratch_1, src);
+ LoadSingle(scratch_0, src, r0);
+ LoadSingle(scratch_1, dst, r0);
+ StoreSingle(scratch_0, dst, r0);
+ StoreSingle(scratch_1, src, r0);
}
void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
@@ -2862,18 +2899,18 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
DoubleRegister scratch) {
DCHECK(!AreAliased(src, scratch));
fmr(scratch, src);
- LoadDouble(src, dst);
- StoreDouble(scratch, dst);
+ LoadDouble(src, dst, r0);
+ StoreDouble(scratch, dst, r0);
}
void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
DoubleRegister scratch_0,
DoubleRegister scratch_1) {
DCHECK(!AreAliased(scratch_0, scratch_1));
- LoadDouble(scratch_0, src);
- LoadDouble(scratch_1, dst);
- StoreDouble(scratch_0, dst);
- StoreDouble(scratch_1, src);
+ LoadDouble(scratch_0, src, r0);
+ LoadDouble(scratch_1, dst, r0);
+ StoreDouble(scratch_0, dst, r0);
+ StoreDouble(scratch_1, src, r0);
}
#ifdef DEBUG
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index f4d9afd47f..db2a4039bd 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -216,6 +216,8 @@ class TurboAssembler : public Assembler {
Register scratch = no_reg);
void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
+ void LoadPC(Register dst);
+ void ComputeCodeStartAddress(Register dst);
void StoreDouble(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
@@ -504,10 +506,18 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
+ void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
+ SmiUntag(reg, reg, rc, scale);
+ }
- void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
- ShiftRightArithImm(dst, src, kSmiShift, rc);
+ void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
+ if (scale > kSmiShift) {
+ ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
+ } else if (scale < kSmiShift) {
+ ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
+ } else {
+ // do nothing
+ }
}
// ---------------------------------------------------------------------------
// Bit testing/extraction
@@ -641,6 +651,10 @@ class TurboAssembler : public Assembler {
void CallStubDelayed(CodeStub* stub);
void LoadConstantPoolPointerRegister();
+
+ // Loads the constant pool pointer (kConstantPoolRegister).
+ void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
+ Register code_target_address);
void AbortConstantPoolBuilding() {
#ifdef DEBUG
// Avoid DCHECK(!is_linked()) failure in ~Label()
@@ -735,10 +749,6 @@ class MacroAssembler : public TurboAssembler {
void PushSafepointRegisters();
void PopSafepointRegisters();
- // Loads the constant pool pointer (kConstantPoolRegister).
- void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
- Register code_target_address);
-
// Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
// from C.
// Does not handle errors.
@@ -933,7 +943,11 @@ class MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -995,6 +1009,10 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index b2b9392319..3dad4ae208 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -261,7 +261,7 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
unresolved_locations_.push_back(
- new UnresolvedLocation(script, shared->start_position(), info));
+ new UnresolvedLocation(script, shared->StartPosition(), info));
}
entry->value = reinterpret_cast<void*>(function_info_list_.size());
function_info_list_.push_back(info);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 9dbe3ff5bd..7e0bcec97a 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -132,10 +132,10 @@ v8::AllocationProfile* HeapProfiler::GetAllocationProfile() {
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
- DCHECK(!is_tracking_allocations());
+ DCHECK(!allocation_tracker_);
if (track_allocations) {
allocation_tracker_.reset(new AllocationTracker(ids_.get(), names_.get()));
- heap()->DisableInlineAllocation();
+ heap()->AddHeapObjectAllocationTracker(this);
heap()->isolate()->debug()->feature_tracker()->Track(
DebugFeatureTracker::kAllocationTracking);
}
@@ -148,9 +148,9 @@ SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream,
void HeapProfiler::StopHeapObjectsTracking() {
ids_->StopHeapObjectsTracking();
- if (is_tracking_allocations()) {
+ if (allocation_tracker_) {
allocation_tracker_.reset();
- heap()->EnableInlineAllocation();
+ heap()->RemoveHeapObjectAllocationTracker(this);
}
}
@@ -206,7 +206,7 @@ Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
void HeapProfiler::ClearHeapObjectMap() {
ids_.reset(new HeapObjectsMap(heap()));
- if (!is_tracking_allocations()) is_tracking_object_moves_ = false;
+ if (!allocation_tracker_) is_tracking_object_moves_ = false;
}
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index d37a882805..507dd579bf 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -24,7 +24,7 @@ class HeapSnapshot;
class SamplingHeapProfiler;
class StringsStorage;
-class HeapProfiler {
+class HeapProfiler : public HeapObjectAllocationTracker {
public:
explicit HeapProfiler(Heap* heap);
~HeapProfiler();
@@ -57,9 +57,9 @@ class HeapProfiler {
void ObjectMoveEvent(Address from, Address to, int size);
- void AllocationEvent(Address addr, int size);
+ void AllocationEvent(Address addr, int size) override;
- void UpdateObjectSizeEvent(Address addr, int size);
+ void UpdateObjectSizeEvent(Address addr, int size) override;
void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
@@ -79,7 +79,6 @@ class HeapProfiler {
}
bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
- bool is_tracking_allocations() const { return !!allocation_tracker_; }
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
void ClearHeapObjectMap();
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index b1e033c5f5..fb61246867 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -161,6 +161,8 @@ const char* HeapEntry::TypeAsString() {
case kConsString: return "/concatenated string/";
case kSlicedString: return "/sliced string/";
case kSymbol: return "/symbol/";
+ case kBigInt:
+ return "/bigint/";
default: return "???";
}
}
@@ -602,7 +604,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
- const char* name = names_->GetName(shared->name());
+ const char* name = names_->GetName(shared->Name());
return AddEntry(object, HeapEntry::kClosure, name);
} else if (object->IsJSBoundFunction()) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
@@ -639,10 +641,12 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
return AddEntry(object, HeapEntry::kHidden, "private symbol");
else
return AddEntry(object, HeapEntry::kSymbol, "symbol");
+ } else if (object->IsBigInt()) {
+ return AddEntry(object, HeapEntry::kBigInt, "bigint");
} else if (object->IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
} else if (object->IsSharedFunctionInfo()) {
- String* name = SharedFunctionInfo::cast(object)->name();
+ String* name = SharedFunctionInfo::cast(object)->Name();
return AddEntry(object,
HeapEntry::kCode,
names_->GetName(name));
@@ -790,20 +794,33 @@ class IndexedReferencesExtractor : public ObjectVisitor {
parent_obj_(parent_obj),
parent_start_(HeapObject::RawField(parent_obj_, 0)),
parent_end_(HeapObject::RawField(parent_obj_, parent_obj_->Size())),
- parent_(parent),
- next_index_(0) {}
+ parent_(parent) {}
void VisitPointers(HeapObject* host, Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
- int index = static_cast<int>(p - HeapObject::RawField(parent_obj_, 0));
- ++next_index_;
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+ }
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override {
+ int next_index = 0;
+ for (MaybeObject** p = start; p < end; p++) {
+ int index = static_cast<int>(reinterpret_cast<Object**>(p) -
+ HeapObject::RawField(parent_obj_, 0));
+ ++next_index;
// |p| could be outside of the object, e.g., while visiting RelocInfo of
// code objects.
- if (p >= parent_start_ && p < parent_end_ && generator_->marks_[index]) {
- generator_->marks_[index] = false;
+ if (reinterpret_cast<Object**>(p) >= parent_start_ &&
+ reinterpret_cast<Object**>(p) < parent_end_ &&
+ generator_->visited_fields_[index]) {
+ generator_->visited_fields_[index] = false;
continue;
}
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_, *p,
- index * kPointerSize);
+ HeapObject* heap_object;
+ // Weak references have been handled explicitly.
+ DCHECK(!(*p)->ToWeakHeapObject(&heap_object));
+ if ((*p)->ToStrongHeapObject(&heap_object)) {
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index,
+ heap_object, index * kPointerSize);
+ }
}
}
@@ -813,7 +830,6 @@ class IndexedReferencesExtractor : public ObjectVisitor {
Object** parent_start_;
Object** parent_end_;
int parent_;
- int next_index_;
};
@@ -863,6 +879,10 @@ bool V8HeapExplorer::ExtractReferencesPass1(int entry, HeapObject* obj) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
} else if (obj->IsAllocationSite()) {
ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
+ } else if (obj->IsFeedbackVector()) {
+ ExtractFeedbackVectorReferences(entry, FeedbackVector::cast(obj));
+ } else if (obj->IsWeakFixedArray()) {
+ ExtractWeakFixedArrayReferences(entry, WeakFixedArray::cast(obj));
}
return true;
}
@@ -1015,8 +1035,8 @@ void V8HeapExplorer::ExtractJSWeakCollectionReferences(int entry,
}
void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
- if (context == context->declaration_context()) {
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+ if (!context->IsNativeContext() && context->is_declaration_context()) {
+ ScopeInfo* scope_info = context->scope_info();
// Add context allocated locals.
int context_locals = scope_info->ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
@@ -1026,7 +1046,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
Context::OffsetOfElementAt(idx));
}
if (scope_info->HasFunctionName()) {
- String* name = scope_info->FunctionName();
+ String* name = String::cast(scope_info->FunctionName());
int idx = scope_info->FunctionContextSlotIndex(name);
if (idx >= 0) {
SetContextReference(context, entry, name, context->get(idx),
@@ -1065,30 +1085,40 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- Object* raw_transitions_or_prototype_info = map->raw_transitions();
- if (raw_transitions_or_prototype_info->IsTransitionArray()) {
- TransitionArray* transitions =
- TransitionArray::cast(raw_transitions_or_prototype_info);
- if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
- TagObject(transitions->GetPrototypeTransitions(),
- "(prototype transitions)");
+ MaybeObject* maybe_raw_transitions_or_prototype_info = map->raw_transitions();
+ HeapObject* raw_transitions_or_prototype_info;
+ if (maybe_raw_transitions_or_prototype_info->ToWeakHeapObject(
+ &raw_transitions_or_prototype_info)) {
+ DCHECK(raw_transitions_or_prototype_info->IsMap());
+ SetWeakReference(map, entry, "transition",
+ raw_transitions_or_prototype_info,
+ Map::kTransitionsOrPrototypeInfoOffset);
+ } else if (maybe_raw_transitions_or_prototype_info->ToStrongHeapObject(
+ &raw_transitions_or_prototype_info)) {
+ DCHECK(!raw_transitions_or_prototype_info->IsWeakCell());
+
+ if (raw_transitions_or_prototype_info->IsTransitionArray()) {
+ TransitionArray* transitions =
+ TransitionArray::cast(raw_transitions_or_prototype_info);
+ if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
+ TagObject(transitions->GetPrototypeTransitions(),
+ "(prototype transitions)");
+ }
+ TagObject(transitions, "(transition array)");
+ SetInternalReference(map, entry, "transitions", transitions,
+ Map::kTransitionsOrPrototypeInfoOffset);
+ } else if (raw_transitions_or_prototype_info->IsTuple3() ||
+ raw_transitions_or_prototype_info->IsFixedArray()) {
+ TagObject(raw_transitions_or_prototype_info, "(transition)");
+ SetInternalReference(map, entry, "transition",
+ raw_transitions_or_prototype_info,
+ Map::kTransitionsOrPrototypeInfoOffset);
+ } else if (map->is_prototype_map()) {
+ TagObject(raw_transitions_or_prototype_info, "prototype_info");
+ SetInternalReference(map, entry, "prototype_info",
+ raw_transitions_or_prototype_info,
+ Map::kTransitionsOrPrototypeInfoOffset);
}
-
- TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry, "transitions", transitions,
- Map::kTransitionsOrPrototypeInfoOffset);
- } else if (raw_transitions_or_prototype_info->IsWeakCell() ||
- raw_transitions_or_prototype_info->IsTuple3() ||
- raw_transitions_or_prototype_info->IsFixedArray()) {
- TagObject(raw_transitions_or_prototype_info, "(transition)");
- SetInternalReference(map, entry, "transition",
- raw_transitions_or_prototype_info,
- Map::kTransitionsOrPrototypeInfoOffset);
- } else if (map->is_prototype_map()) {
- TagObject(raw_transitions_or_prototype_info, "prototype_info");
- SetInternalReference(map, entry, "prototype_info",
- raw_transitions_or_prototype_info,
- Map::kTransitionsOrPrototypeInfoOffset);
}
DescriptorArray* descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
@@ -1131,31 +1161,22 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
const char* name = nullptr;
if (shared_name != heap_->empty_string()) {
name = names_->GetName(shared_name);
- TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
+ TagObject(shared->GetCode(), names_->GetFormatted("(code for %s)", name));
} else {
- TagObject(shared->code(), names_->GetFormatted("(%s code)",
- Code::Kind2String(shared->code()->kind())));
+ TagObject(shared->GetCode(),
+ names_->GetFormatted(
+ "(%s code)", Code::Kind2String(shared->GetCode()->kind())));
}
- SetInternalReference(obj, entry, "raw_name", shared->raw_name(),
- SharedFunctionInfo::kNameOffset);
- SetInternalReference(obj, entry,
- "code", shared->code(),
- SharedFunctionInfo::kCodeOffset);
- TagObject(shared->scope_info(), "(function scope info)");
- SetInternalReference(obj, entry,
- "scope_info", shared->scope_info(),
- SharedFunctionInfo::kScopeInfoOffset);
+ if (shared->name_or_scope_info()->IsScopeInfo()) {
+ TagObject(shared->name_or_scope_info(), "(function scope info)");
+ }
+ SetInternalReference(obj, entry, "name_or_scope_info",
+ shared->name_or_scope_info(),
+ SharedFunctionInfo::kNameOrScopeInfoOffset);
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
- const char* construct_stub_name = name ?
- names_->GetFormatted("(construct stub code for %s)", name) :
- "(construct stub code)";
- TagObject(shared->construct_stub(), construct_stub_name);
- SetInternalReference(obj, entry,
- "construct_stub", shared->construct_stub(),
- SharedFunctionInfo::kConstructStubOffset);
SetInternalReference(obj, entry,
"function_data", shared->function_data(),
SharedFunctionInfo::kFunctionDataOffset);
@@ -1165,12 +1186,12 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry, "function_identifier",
shared->function_identifier(),
SharedFunctionInfo::kFunctionIdentifierOffset);
- SetInternalReference(obj, entry, "feedback_metadata",
- shared->feedback_metadata(),
- SharedFunctionInfo::kFeedbackMetadataOffset);
+ SetInternalReference(
+ obj, entry, "raw_outer_scope_info_or_feedback_metadata",
+ shared->raw_outer_scope_info_or_feedback_metadata(),
+ SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset);
}
-
void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
HeapObject* obj = script;
SetInternalReference(obj, entry,
@@ -1343,8 +1364,10 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
int key_entry_index = key_entry->index();
HeapEntry* value_entry = GetEntry(value);
if (key_entry && value_entry) {
+ const char* edge_name =
+ names_->GetFormatted("key %s in WeakMap", key_entry->name());
filler_->SetNamedAutoIndexReference(HeapGraphEdge::kInternal,
- key_entry_index, "WeakMap",
+ key_entry_index, edge_name,
value_entry);
}
}
@@ -1362,6 +1385,28 @@ void V8HeapExplorer::ExtractFixedArrayReferences(int entry, FixedArray* array) {
}
}
+void V8HeapExplorer::ExtractFeedbackVectorReferences(
+ int entry, FeedbackVector* feedback_vector) {
+ MaybeObject* code = feedback_vector->optimized_code_weak_or_smi();
+ HeapObject* code_heap_object;
+ if (code->ToWeakHeapObject(&code_heap_object)) {
+ SetWeakReference(feedback_vector, entry, "optimized code", code_heap_object,
+ FeedbackVector::kOptimizedCodeOffset);
+ }
+}
+
+void V8HeapExplorer::ExtractWeakFixedArrayReferences(int entry,
+ WeakFixedArray* array) {
+ for (int i = 0; i < array->length(); ++i) {
+ MaybeObject* object = array->Get(i);
+ HeapObject* heap_object;
+ if (object->ToWeakHeapObject(&heap_object)) {
+ SetWeakReference(array, entry, i, heap_object,
+ WeakFixedArray::kHeaderSize + i * kPointerSize);
+ }
+ }
+}
+
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
Isolate* isolate = js_obj->GetIsolate();
if (js_obj->HasFastProperties()) {
@@ -1565,11 +1610,11 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
if (interrupted) continue;
size_t max_pointer = obj->Size() / kPointerSize;
- if (max_pointer > marks_.size()) {
+ if (max_pointer > visited_fields_.size()) {
// Clear the current bits.
- std::vector<bool>().swap(marks_);
+ std::vector<bool>().swap(visited_fields_);
// Reallocate to right size.
- marks_.resize(max_pointer, false);
+ visited_fields_.resize(max_pointer, false);
}
HeapEntry* heap_entry = GetEntry(obj);
@@ -1582,6 +1627,10 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
IndexedReferencesExtractor refs_extractor(this, obj, entry);
obj->Iterate(&refs_extractor);
}
+ // Ensure visited_fields_ doesn't leak to the next object.
+ for (size_t i = 0; i < max_pointer; ++i) {
+ DCHECK(!visited_fields_[i]);
+ }
if (!progress_->ProgressReport(false)) interrupted = true;
}
@@ -1593,6 +1642,7 @@ bool V8HeapExplorer::IsEssentialObject(Object* object) {
return object->IsHeapObject() && !object->IsOddball() &&
object != heap_->empty_byte_array() &&
object != heap_->empty_fixed_array() &&
+ object != heap_->empty_weak_fixed_array() &&
object != heap_->empty_descriptor_array() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() &&
object != heap_->global_property_cell_map() &&
@@ -1626,15 +1676,14 @@ void V8HeapExplorer::SetContextReference(HeapObject* parent_obj,
if (child_entry == nullptr) return;
filler_->SetNamedReference(HeapGraphEdge::kContextVariable, parent_entry,
names_->GetName(reference_name), child_entry);
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
-
-void V8HeapExplorer::MarkVisitedField(HeapObject* obj, int offset) {
+void V8HeapExplorer::MarkVisitedField(int offset) {
if (offset < 0) return;
int index = offset / kPointerSize;
- DCHECK(!marks_[index]);
- marks_[index] = true;
+ DCHECK(!visited_fields_[index]);
+ visited_fields_[index] = true;
}
@@ -1676,7 +1725,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
@@ -1694,7 +1743,7 @@ void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
names_->GetName(index),
child_entry);
}
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
@@ -1724,7 +1773,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
reference_name,
child_entry);
}
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
@@ -1742,7 +1791,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
names_->GetFormatted("%d", index),
child_entry);
}
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
@@ -1783,7 +1832,7 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
: names_->GetName(reference_name);
filler_->SetNamedReference(type, parent_entry, name, child_entry);
- MarkVisitedField(parent_obj, field_offset);
+ MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetRootGcRootsReference() {
@@ -2460,9 +2509,15 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == nullptr) return;
HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
- progress_total_ = iterations_count * (
- v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount());
+ // The +1 ensures that intermediate ProgressReport calls will never signal
+ // that the work is finished (i.e. progress_counter_ == progress_total_).
+ // Only the forced ProgressReport() at the end of GenerateSnapshot()
+ // should signal that the work is finished because signalling finished twice
+ // breaks the DevTools frontend.
+ progress_total_ =
+ iterations_count * (v8_heap_explorer_.EstimateObjectsCount(&iterator) +
+ dom_explorer_.EstimateObjectsCount()) +
+ 1;
progress_counter_ = 0;
}
@@ -2780,7 +2835,8 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("synthetic") ","
JSON_S("concatenated string") ","
JSON_S("sliced string") ","
- JSON_S("symbol")) ","
+ JSON_S("symbol") ","
+ JSON_S("bigint")) ","
JSON_S("string") ","
JSON_S("number") ","
JSON_S("number") ","
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 5c7d88e0ca..8e492807e9 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -99,7 +99,8 @@ class HeapEntry BASE_EMBEDDED {
kSynthetic = v8::HeapGraphNode::kSynthetic,
kConsString = v8::HeapGraphNode::kConsString,
kSlicedString = v8::HeapGraphNode::kSlicedString,
- kSymbol = v8::HeapGraphNode::kSymbol
+ kSymbol = v8::HeapGraphNode::kSymbol,
+ kBigInt = v8::HeapGraphNode::kBigInt
};
static const int kNoEntry;
@@ -355,7 +356,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
HeapObject* object);
- void MarkVisitedField(HeapObject* obj, int offset);
+ void MarkVisitedField(int offset);
HeapEntry* AddEntry(HeapObject* object);
HeapEntry* AddEntry(HeapObject* object,
@@ -392,6 +393,9 @@ class V8HeapExplorer : public HeapEntriesAllocator {
void ExtractJSArrayBufferReferences(int entry, JSArrayBuffer* buffer);
void ExtractJSPromiseReferences(int entry, JSPromise* promise);
void ExtractFixedArrayReferences(int entry, FixedArray* array);
+ void ExtractFeedbackVectorReferences(int entry,
+ FeedbackVector* feedback_vector);
+ void ExtractWeakFixedArrayReferences(int entry, WeakFixedArray* array);
void ExtractPropertyReferences(JSObject* js_obj, int entry);
void ExtractAccessorPairProperty(JSObject* js_obj, int entry, Name* key,
Object* callback_obj, int field_offset = -1);
@@ -469,7 +473,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
std::unordered_map<const FixedArray*, FixedArraySubInstanceType> array_types_;
v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
- std::vector<bool> marks_;
+ std::vector<bool> visited_fields_;
friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 9786741b94..db95718936 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -152,7 +152,7 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
Script* script = Script::cast(shared->script());
set_script_id(script->id());
- set_position(shared->start_position());
+ set_position(shared->StartPosition());
set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
}
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index cec71d70e0..16ec9d883a 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -37,7 +37,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
rec->entry = NewCodeEntry(
tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, code->instruction_start());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, code->InstructionStart());
RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
@@ -51,7 +51,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
rec->entry = NewCodeEntry(
tag, GetFunctionName(name), CodeEntry::kEmptyNamePrefix,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, code->instruction_start());
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, code->InstructionStart());
RecordInliningInfo(rec->entry, code);
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
@@ -68,7 +68,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
GetName(InferScriptName(script_name, shared)),
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
- nullptr, code->instruction_start());
+ nullptr, code->InstructionStart());
RecordInliningInfo(rec->entry, code);
rec->entry->FillFunctionInfo(shared);
rec->size = code->ExecutableSize();
@@ -104,7 +104,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
rec->entry = NewCodeEntry(
tag, GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix,
GetName(InferScriptName(script_name, shared)), line, column,
- std::move(line_table), abstract_code->instruction_start());
+ std::move(line_table), abstract_code->InstructionStart());
RecordInliningInfo(rec->entry, abstract_code);
RecordDeoptInlinedFrames(rec->entry, abstract_code);
rec->entry->FillFunctionInfo(shared);
@@ -113,7 +113,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- wasm::WasmCode* code,
+ const wasm::WasmCode* code,
wasm::WasmName name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -179,25 +179,11 @@ void ProfilerListener::RegExpCodeCreateEvent(AbstractCode* code,
"RegExp: ", CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->instruction_start());
+ code->raw_instruction_start());
rec->size = code->ExecutableSize();
DispatchCodeEvent(evt_rec);
}
-void ProfilerListener::InstructionStreamCreateEvent(
- CodeEventListener::LogEventsAndTags tag, const InstructionStream* stream,
- const char* description) {
- CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
- CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->start = stream->bytes();
- rec->entry = NewCodeEntry(
- tag, description, CodeEntry::kEmptyNamePrefix,
- CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, stream->bytes());
- rec->size = static_cast<unsigned>(stream->byte_length());
- DispatchCodeEvent(evt_rec);
-}
-
void ProfilerListener::SetterCallbackEvent(Name* name, Address entry_point) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@@ -259,7 +245,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
CodeEntry::kEmptyNamePrefix, resource_name,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->instruction_start());
+ code->InstructionStart());
inline_entry->FillFunctionInfo(shared_info);
inline_stack.emplace_back(inline_entry);
}
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index ca2c213a93..791b66f3ed 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -38,7 +38,8 @@ class ProfilerListener : public CodeEventListener {
AbstractCode* code, SharedFunctionInfo* shared,
Name* script_name, int line, int column) override;
void CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
- wasm::WasmCode* code, wasm::WasmName name) override;
+ const wasm::WasmCode* code,
+ wasm::WasmName name) override;
void CodeMovingGCEvent() override {}
void CodeMoveEvent(AbstractCode* from, Address to) override;
@@ -48,9 +49,6 @@ class ProfilerListener : public CodeEventListener {
int fp_to_sp_delta) override;
void GetterCallbackEvent(Name* name, Address entry_point) override;
void RegExpCodeCreateEvent(AbstractCode* code, String* source) override;
- void InstructionStreamCreateEvent(CodeEventListener::LogEventsAndTags tag,
- const InstructionStream* stream,
- const char* description) override;
void SetterCallbackEvent(Name* name, Address entry_point) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 31c885fef0..6912f3eba1 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -209,7 +209,7 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
Script* script = Script::cast(shared->script());
script_id = script->id();
}
- node = node->FindOrAddChildNode(name, script_id, shared->start_position());
+ node = node->FindOrAddChildNode(name, script_id, shared->StartPosition());
}
if (found_arguments_marker_frames) {
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 1593ce5ed1..d2a7b6695e 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -5,7 +5,7 @@
#include "src/property-descriptor.h"
#include "src/bootstrapper.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/lookup.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index ac3597321a..3d8a62d7b9 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -138,7 +138,7 @@ class PrototypeIterator {
// Returns false iff a call to JSProxy::GetPrototype throws.
// TODO(neis): This should probably replace Advance().
- MUST_USE_RESULT bool AdvanceFollowingProxies() {
+ V8_WARN_UNUSED_RESULT bool AdvanceFollowingProxies() {
DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
if (!HasAccess()) {
// Abort the lookup if we do not have access to the current object.
@@ -149,7 +149,7 @@ class PrototypeIterator {
return AdvanceFollowingProxiesIgnoringAccessChecks();
}
- MUST_USE_RESULT bool AdvanceFollowingProxiesIgnoringAccessChecks() {
+ V8_WARN_UNUSED_RESULT bool AdvanceFollowingProxiesIgnoringAccessChecks() {
if (handle_.is_null() || !handle_->IsJSProxy()) {
AdvanceIgnoringProxies();
return true;
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 5f9d3905a3..d2a20f3af7 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -8,7 +8,7 @@
#include "src/assembler-inl.h"
#include "src/code-stubs.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -575,7 +575,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
__ cmp(current_character(), Operand('z'));
BranchOrBacktrack(hi, on_no_match);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand::Zero());
@@ -589,7 +589,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
__ cmp(current_character(), Operand('z'));
__ b(hi, &done);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand::Zero());
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index d366349640..bf68d8061e 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -650,7 +650,7 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
// Table is 256 entries, so all Latin1 characters can be tested.
CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ Mov(x10, map);
__ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
CompareAndBranchOrBacktrack(w10, 0, eq, on_no_match);
@@ -663,7 +663,7 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
__ Cmp(current_character(), 'z');
__ B(hi, &done);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ Mov(x10, map);
__ Ldrb(w10, MemOperand(x10, current_character(), UXTW));
CompareAndBranchOrBacktrack(w10, 0, ne, on_no_match);
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index cb240d6c67..cd84329a78 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -582,7 +582,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
BranchOrBacktrack(above, on_no_match);
}
DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
+ ExternalReference word_map =
+ ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
Operand::StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(zero, on_no_match);
@@ -596,7 +597,8 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
__ j(above, &done);
}
DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
- ExternalReference word_map = ExternalReference::re_word_character_map();
+ ExternalReference word_map =
+ ExternalReference::re_word_character_map(isolate());
__ test_b(current_character(),
Operand::StaticArray(current_character(), times_1, word_map));
BranchOrBacktrack(not_zero, on_no_match);
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index b90b0a51a6..e26ebaa740 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -11,7 +11,7 @@
#include "src/compilation-cache.h"
#include "src/elements.h"
#include "src/execution.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/ostreams.h"
@@ -59,7 +59,7 @@
namespace v8 {
namespace internal {
-MUST_USE_RESULT
+V8_WARN_UNUSED_RESULT
static inline MaybeHandle<Object> ThrowRegExpException(
Handle<JSRegExp> re, Handle<String> pattern, Handle<String> error_text) {
Isolate* isolate = re->GetIsolate();
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 021c59d3e4..f8d21617a1 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
+#include "src/isolate.h"
#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -71,13 +72,12 @@ class RegExpImpl {
// generic data and choice of implementation - as well as what
// the implementation wants to store in the data field.
// Returns false if compilation fails.
- MUST_USE_RESULT static MaybeHandle<Object> Compile(Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
+ Handle<JSRegExp> re, Handle<String> pattern, JSRegExp::Flags flags);
// See ECMA-262 section 15.10.6.2.
// This function calls the garbage collector if necessary.
- V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Exec(
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Exec(
Handle<JSRegExp> regexp, Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
@@ -132,7 +132,7 @@ class RegExpImpl {
// On a successful match, the result is a JSArray containing
// captured positions. On a failure, the result is the null value.
// Returns an empty handle in case of an exception.
- MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> IrregexpExec(
Handle<JSRegExp> regexp, Handle<String> subject, int index,
Handle<RegExpMatchInfo> last_match_info);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 89046a56f3..e16fbd6568 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -569,7 +569,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// Table is 256 entries, so all Latin1 characters can be tested.
BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ li(a0, Operand(map));
__ Addu(a0, a0, current_character());
__ lbu(a0, MemOperand(a0, 0));
@@ -582,7 +582,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// Table is 256 entries, so all Latin1 characters can be tested.
__ Branch(&done, hi, current_character(), Operand('z'));
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ li(a0, Operand(map));
__ Addu(a0, a0, current_character());
__ lbu(a0, MemOperand(a0, 0));
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 841b2931fe..40ac387c4e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -600,7 +600,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// Table is 256 entries, so all Latin1 characters can be tested.
BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ li(a0, Operand(map));
__ Daddu(a0, a0, current_character());
__ Lbu(a0, MemOperand(a0, 0));
@@ -613,7 +613,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
// Table is 256 entries, so all Latin1 characters can be tested.
__ Branch(&done, hi, current_character(), Operand('z'));
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map = ExternalReference::re_word_character_map(isolate());
__ li(a0, Operand(map));
__ Daddu(a0, a0, current_character());
__ Lbu(a0, MemOperand(a0, 0));
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index 1187fc04b8..fdda46424e 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -591,7 +591,8 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
__ cmpi(current_character(), Operand('z'));
BranchOrBacktrack(gt, on_no_match);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
__ mov(r3, Operand(map));
__ lbzx(r3, MemOperand(r3, current_character()));
__ cmpli(r3, Operand::Zero());
@@ -605,7 +606,8 @@ bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
__ cmpli(current_character(), Operand('z'));
__ bgt(&done);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
__ mov(r3, Operand(map));
__ lbzx(r3, MemOperand(r3, current_character()));
__ cmpli(r3, Operand::Zero());
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index af285abcb0..7798fcecc1 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -161,8 +161,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
Isolate* isolate, int start_index, bool is_direct_call,
Address* return_address, Code* re_code, String** subject,
const byte** input_start, const byte** input_end) {
- DCHECK(re_code->instruction_start() <= *return_address);
- DCHECK(*return_address <= re_code->instruction_end());
+ DCHECK(re_code->raw_instruction_start() <= *return_address);
+ DCHECK(*return_address <= re_code->raw_instruction_end());
int return_value = 0;
// Prepare for possible GC.
HandleScope handles(isolate);
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 72ed5b8d69..ebc56650b1 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -7,7 +7,7 @@
#include <vector>
#include "src/char-predicates-inl.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/ostreams.h"
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index d483125dd6..0857342c0c 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -4,7 +4,7 @@
#include "src/regexp/regexp-utils.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/regexp/jsregexp.h"
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 4161337ad4..7508403e01 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -21,13 +21,13 @@ class RegExpUtils : public AllStatic {
int capture, bool* ok = nullptr);
// Last index (RegExp.lastIndex) accessors.
- static MUST_USE_RESULT MaybeHandle<Object> SetLastIndex(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLastIndex(
Isolate* isolate, Handle<JSReceiver> regexp, uint64_t value);
- static MUST_USE_RESULT MaybeHandle<Object> GetLastIndex(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> GetLastIndex(
Isolate* isolate, Handle<JSReceiver> recv);
// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
- static MUST_USE_RESULT MaybeHandle<Object> RegExpExec(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> RegExpExec(
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
Handle<Object> exec);
@@ -43,7 +43,7 @@ class RegExpUtils : public AllStatic {
// AdvanceStringIndex ( S, index, unicode )
static uint64_t AdvanceStringIndex(Isolate* isolate, Handle<String> string,
uint64_t index, bool unicode);
- static MUST_USE_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
+ static V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
bool unicode);
};
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index 4f8f234171..837d5639cc 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -562,7 +562,8 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
__ CmpP(current_character(), Operand('z'));
BranchOrBacktrack(gt, on_no_match);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
__ mov(r2, Operand(map));
__ LoadlB(r2, MemOperand(r2, current_character()));
__ CmpLogicalP(r2, Operand::Zero());
@@ -576,7 +577,8 @@ bool RegExpMacroAssemblerS390::CheckSpecialCharacterClass(uc16 type,
__ CmpLogicalP(current_character(), Operand('z'));
__ bgt(&done);
}
- ExternalReference map = ExternalReference::re_word_character_map();
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
__ mov(r2, Operand(map));
__ LoadlB(r2, MemOperand(r2, current_character()));
__ CmpLogicalP(r2, Operand::Zero());
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index eb57b29602..c032ca1ff8 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,7 +6,7 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/log.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@@ -623,7 +623,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
- __ Move(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map(isolate()));
DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
@@ -637,7 +637,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate('z'));
__ j(above, &done);
}
- __ Move(rbx, ExternalReference::re_word_character_map());
+ __ Move(rbx, ExternalReference::re_word_character_map(isolate()));
DCHECK_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
__ testb(Operand(rbx, current_character(), times_1, 0),
current_character());
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 6b472a0b4e..f71423d26e 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -132,22 +132,10 @@ class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
get_num_allocatable_double_registers(),
InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
- InitializeGeneralRegisterNames(), kFloatRegisterNames,
- kDoubleRegisterNames, kSimd128RegisterNames) {}
+ kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
+ kSimd128RegisterNames) {}
private:
- static char const* const* InitializeGeneralRegisterNames() {
- int filtered_index = 0;
- for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
- if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
- general_register_names_[filtered_index] = kGeneralRegisterNames[i];
- filtered_index++;
- }
- }
- DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
- return general_register_names_;
- }
-
static const int* InitializeGeneralRegisterCodes() {
int filtered_index = 0;
for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
@@ -161,14 +149,10 @@ class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
return allocatable_general_codes_;
}
- static const char*
- general_register_names_[kMaxAllocatableGeneralRegisterCount - 1];
static int
allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
};
-const char* ArchDefaultPoisoningRegisterConfiguration::general_register_names_
- [kMaxAllocatableGeneralRegisterCount - 1];
int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
[kMaxAllocatableGeneralRegisterCount - 1];
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 58cf706d0c..35456713a9 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -134,8 +134,8 @@ void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
DCHECK(shared->HasBytecodeArray());
- int level = shared->bytecode_array()->osr_loop_nesting_level();
- shared->bytecode_array()->set_osr_loop_nesting_level(
+ int level = shared->GetBytecodeArray()->osr_loop_nesting_level();
+ shared->GetBytecodeArray()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
@@ -184,7 +184,7 @@ bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
- if (shared->bytecode_array()->length() <= allowance) {
+ if (shared->GetBytecodeArray()->length() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
@@ -197,17 +197,17 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
- if (shared->bytecode_array()->length() > kMaxBytecodeSizeForOpt) {
+ if (shared->GetBytecodeArray()->length() > kMaxBytecodeSizeForOpt) {
return OptimizationReason::kDoNotOptimize;
}
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
- (shared->bytecode_array()->length() / kBytecodeSizeAllowancePerTick);
+ (shared->GetBytecodeArray()->length() / kBytecodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
- } else if (!any_ic_changed_ &&
- shared->bytecode_array()->length() < kMaxBytecodeSizeForEarlyOpt) {
+ } else if (!any_ic_changed_ && shared->GetBytecodeArray()->length() <
+ kMaxBytecodeSizeForEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
@@ -220,7 +220,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- shared->bytecode_array()->length(), kMaxBytecodeSizeForEarlyOpt);
+ shared->GetBytecodeArray()->length(), kMaxBytecodeSizeForEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 648606a274..f85eea8aee 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -8,7 +8,7 @@
#include "src/code-stubs.h"
#include "src/conversions-inl.h"
#include "src/elements.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/messages.h"
@@ -391,7 +391,7 @@ RUNTIME_FUNCTION(Runtime_TrySliceSimpleNonFastElements) {
// implementation.
if (receiver->IsJSArray()) {
// This "fastish" path must make sure the destination array is a JSArray.
- if (!isolate->IsSpeciesLookupChainIntact() ||
+ if (!isolate->IsArraySpeciesLookupChainIntact() ||
!JSArray::cast(*receiver)->HasArrayPrototype(isolate)) {
return Smi::FromInt(0);
}
@@ -794,23 +794,5 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
return Smi::FromInt(-1);
}
-
-RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
-
- // Iterate over the spread if we need to.
- if (spread->IterationHasObservableEffects()) {
- Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, spread,
- Execution::Call(isolate, spread_iterable_function,
- isolate->factory()->undefined_value(), 1, &spread));
- }
-
- return *spread;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 9849c694dc..aa2e260a09 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -8,7 +8,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/conversions-inl.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 7869e32dd1..7b1379cf32 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -32,7 +32,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
- Handle<String> name(constructor->shared()->name(), isolate);
+ Handle<String> name(constructor->shared()->Name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
}
@@ -65,7 +65,7 @@ Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
Handle<JSFunction> function) {
Handle<String> super_name;
if (constructor->IsJSFunction()) {
- super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->name(),
+ super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->Name(),
isolate);
} else if (constructor->IsOddball()) {
DCHECK(constructor->IsNull(isolate));
@@ -77,7 +77,7 @@ Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
if (super_name->length() == 0) {
super_name = isolate->factory()->null_string();
}
- Handle<String> function_name(function->shared()->name(), isolate);
+ Handle<String> function_name(function->shared()->Name(), isolate);
// anonymous class
if (function_name->length() == 0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -158,7 +158,7 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
SetHomeObject(isolate, *method, *home_object);
- if (!method->shared()->has_shared_name()) {
+ if (!method->shared()->HasSharedName()) {
// TODO(ishell): method does not have a shared name at this point only if
// the key is a computed property name. However, the bytecode generator
// explicitly generates ToName bytecodes to ensure that the computed
@@ -195,7 +195,7 @@ Object* GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
SetHomeObject(isolate, *method, home_object);
- DCHECK(method->shared()->has_shared_name());
+ DCHECK(method->shared()->HasSharedName());
return *method;
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index efe4f455b1..7b2cae3337 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -6,7 +6,7 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 14b61b0ac6..3f894fd929 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -68,7 +68,7 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
DCHECK(FLAG_log_function_events);
Handle<SharedFunctionInfo> sfi(function->shared());
LOG(isolate, FunctionEvent("first-execution", Script::cast(sfi->script()), -1,
- 0, sfi->start_position(), sfi->end_position(),
+ 0, sfi->StartPosition(), sfi->EndPosition(),
sfi->DebugName()));
function->feedback_vector()->ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
@@ -129,20 +129,15 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
return *result.ToHandleChecked();
}
}
- // Remove wasm data, mark as broken for asm->wasm,
- // replace code with CompileLazy, and return a smi 0 to indicate failure.
+ // Remove wasm data, mark as broken for asm->wasm, replace function code with
+ // CompileLazy, and return a smi 0 to indicate failure.
if (function->shared()->HasAsmWasmData()) {
- function->shared()->ClearAsmWasmData();
+ function->shared()->FlushCompiled();
}
function->shared()->set_is_asm_wasm_broken(true);
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
- if (function->shared()->code() ==
- isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
- function->shared()->set_code(
- isolate->builtins()->builtin(Builtins::kCompileLazy));
- }
return Smi::kZero;
}
@@ -290,7 +285,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
}
if (!function->IsOptimized()) {
- function->set_code(function->shared()->code());
+ function->set_code(function->shared()->GetCode());
}
return nullptr;
}
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index d149af652b..e459da4da3 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -7,7 +7,7 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
#include "src/date.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index daef53280e..a47ea2caaf 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -16,6 +16,7 @@
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/globals.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
@@ -43,16 +44,25 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
- isolate->debug()->Break(it.frame(), handle(it.frame()->function()));
+ if (isolate->debug_execution_mode() == DebugInfo::kBreakpoints) {
+ isolate->debug()->Break(it.frame(), handle(it.frame()->function()));
+ }
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(it.frame());
SharedFunctionInfo* shared = interpreted_frame->function()->shared();
- BytecodeArray* bytecode_array = shared->bytecode_array();
+ BytecodeArray* bytecode_array = shared->GetBytecodeArray();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+
+ bool side_effect_check_failed = false;
+ if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
+ side_effect_check_failed =
+ !isolate->debug()->PerformSideEffectCheckAtBytecode(interpreted_frame);
+ }
+
if (Bytecodes::Returns(bytecode)) {
// If we are returning (or suspending), reset the bytecode array on the
// interpreted stack frame to the non-debug variant so that the interpreter
@@ -70,10 +80,35 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
isolate->interpreter()->GetAndMaybeDeserializeBytecodeHandler(bytecode,
operand_scale);
- return MakePair(isolate->debug()->return_value(),
+ return MakePair(side_effect_check_failed ? isolate->heap()->exception()
+ : isolate->debug()->return_value(),
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
+RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ USE(function);
+
+ DCHECK(function->shared()->HasDebugInfo());
+ DCHECK(function->shared()->GetDebugInfo()->BreakAtEntry());
+
+ // Get the top-most JavaScript frame.
+ JavaScriptFrameIterator it(isolate);
+ DCHECK_EQ(*function, it.frame()->function());
+ isolate->debug()->Break(it.frame(), function);
+
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugApplyInstrumentation) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ isolate->debug()->ApplyInstrumentation(handle(function->shared()));
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
SealHandleScope shs(isolate);
@@ -1143,7 +1178,8 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- RETURN_RESULT_OR_FAILURE(isolate, DebugEvaluate::Global(isolate, source));
+ RETURN_RESULT_OR_FAILURE(isolate,
+ DebugEvaluate::Global(isolate, source, false));
}
@@ -1636,15 +1672,11 @@ RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
if (isolate->debug()->last_step_action() >= StepIn) {
isolate->debug()->PrepareStepIn(fun);
}
- if (isolate->needs_side_effect_check() &&
+ if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheck(fun)) {
return isolate->heap()->exception();
}
}
- if (fun->shared()->HasDebugInfo() &&
- fun->shared()->GetDebugInfo()->BreakAtEntry()) {
- isolate->debug()->Break(nullptr, fun);
- }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 5c67372801..9193daf9db 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -6,7 +6,7 @@
#include "src/arguments.h"
#include "src/elements.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/keys.h"
#include "src/objects-inl.h"
@@ -36,11 +36,12 @@ MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
// Test again, since cache may have been built by GetKeys() calls above.
if (!accumulator.is_receiver_simple_enum()) return keys;
}
+ DCHECK(!receiver->IsJSModuleNamespace());
return handle(receiver->map(), isolate);
}
// This is a slight modifcation of JSReceiver::HasProperty, dealing with
-// the oddities of JSProxy in for-in filter.
+// the oddities of JSProxy and JSModuleNamespace in for-in filter.
MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Object> key) {
@@ -92,7 +93,14 @@ MaybeHandle<Object> HasEnumerableProperty(Isolate* isolate,
case LookupIterator::INTEGER_INDEXED_EXOTIC:
// TypedArray out-of-bounds access.
return isolate->factory()->undefined_value();
- case LookupIterator::ACCESSOR:
+ case LookupIterator::ACCESSOR: {
+ if (it.GetHolder<Object>()->IsJSModuleNamespace()) {
+ result = JSModuleNamespace::GetPropertyAttributes(&it);
+ if (result.IsNothing()) return MaybeHandle<Object>();
+ DCHECK_EQ(0, result.FromJust() & DONT_ENUM);
+ }
+ return it.GetName();
+ }
case LookupIterator::DATA:
return it.GetName();
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index a9eddef644..5dcb4115e5 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -76,7 +76,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- int pos = fun->shared()->start_position();
+ int pos = fun->shared()->StartPosition();
return Smi::FromInt(pos);
}
@@ -112,26 +112,24 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
return isolate->heap()->exception();
}
- // Set the code, scope info, formal parameter count, and the length
+ // Set the function data, scope info, formal parameter count, and the length
// of the target shared function info.
- target_shared->set_code(source_shared->code());
- if (source_shared->HasBytecodeArray()) {
- target_shared->set_bytecode_array(source_shared->bytecode_array());
- }
- target_shared->set_scope_info(source_shared->scope_info());
- target_shared->set_outer_scope_info(source_shared->outer_scope_info());
+ target_shared->set_function_data(source_shared->function_data());
target_shared->set_length(source_shared->GetLength());
- target_shared->set_feedback_metadata(source_shared->feedback_metadata());
+ target_shared->set_raw_outer_scope_info_or_feedback_metadata(
+ source_shared->raw_outer_scope_info_or_feedback_metadata());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
- target_shared->set_start_position_and_type(
- source_shared->start_position_and_type());
- target_shared->set_end_position(source_shared->end_position());
+ target_shared->set_raw_start_position_and_type(
+ source_shared->raw_start_position_and_type());
+ target_shared->set_raw_end_position(source_shared->raw_end_position());
bool was_native = target_shared->native();
- target_shared->set_compiler_hints(source_shared->compiler_hints());
+ target_shared->set_flags(source_shared->flags());
target_shared->set_native(was_native);
target_shared->set_function_literal_id(source_shared->function_literal_id());
+ target_shared->set_scope_info(source_shared->scope_info());
+
Handle<Object> source_script(source_shared->script(), isolate);
if (source_script->IsScript()) {
SharedFunctionInfo::SetScript(source_shared,
@@ -140,8 +138,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
SharedFunctionInfo::SetScript(target_shared, source_script);
// Set the code of the target function.
- target->set_code(source_shared->code());
-
+ target->set_code(source_shared->GetCode());
Handle<Context> context(source->context());
target->set_context(*context);
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index a7d14b839e..e69d334042 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -5,7 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -26,7 +26,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared()->HasBytecodeArray());
- int size = function->shared()->bytecode_array()->register_count();
+ int size = function->shared()->GetBytecodeArray()->register_count();
Handle<FixedArray> register_file = isolate->factory()->NewFixedArray(size);
Handle<JSGeneratorObject> generator =
@@ -153,7 +153,7 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
SharedFunctionInfo* shared = generator->function()->shared();
DCHECK(shared->HasBytecodeArray());
- HandlerTable handler_table(shared->bytecode_array());
+ HandlerTable handler_table(shared->GetBytecodeArray());
int pc = Smi::cast(generator->input_or_debug_pos())->value();
HandlerTable::CatchPrediction catch_prediction = HandlerTable::ASYNC_AWAIT;
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index a24ded7e21..c5a693448b 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -151,9 +151,9 @@ RUNTIME_FUNCTION(Runtime_ThrowInvalidTypedArrayAlignment) {
Handle<String> type =
isolate->factory()->NewStringFromAsciiChecked(ElementsKindToType(kind));
- ExternalArrayType external_type =
- isolate->factory()->GetArrayTypeFromElementsKind(kind);
- size_t size = isolate->factory()->GetExternalArrayElementSize(external_type);
+ ExternalArrayType external_type;
+ size_t size;
+ Factory::TypeAndSizeForElementsKind(kind, &external_type, &size);
Handle<Object> element_size =
handle(Smi::FromInt(static_cast<int>(size)), isolate);
@@ -458,27 +458,22 @@ RUNTIME_FUNCTION(Runtime_DeserializeLazy) {
DCHECK(FLAG_lazy_deserialization);
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- int builtin_id = shared->lazy_deserialization_builtin_id();
+#ifdef DEBUG
+ int builtin_id = shared->builtin_id();
// At this point, the builtins table should definitely have DeserializeLazy
- // set at the position of the target builtin. Also, we should never lazily
- // deserialize DeserializeLazy.
-
- DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
- DCHECK_EQ(Builtins::kDeserializeLazy,
- isolate->builtins()->builtin(builtin_id)->builtin_index());
-
+ // set at the position of the target builtin.
+ CHECK_EQ(Builtins::kDeserializeLazy,
+ isolate->builtins()->builtin(builtin_id)->builtin_index());
// The DeserializeLazy builtin tail-calls the deserialized builtin. This only
// works with JS-linkage.
- DCHECK(Builtins::IsLazy(builtin_id));
- DCHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
+ CHECK(Builtins::IsLazy(builtin_id));
+ CHECK_EQ(Builtins::TFJ, Builtins::KindOf(builtin_id));
+#endif // DEBUG
- Code* code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
- DCHECK_EQ(builtin_id, code->builtin_index());
- DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
- shared->set_code(code);
- function->set_code(code);
+ Code* code = Snapshot::EnsureBuiltinIsDeserialized(isolate, shared);
+ function->set_code(code);
return code;
}
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 385f386b6b..a49a4423cf 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -14,8 +14,8 @@
#include "src/api-natives.h"
#include "src/api.h"
#include "src/arguments.h"
-#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/heap/factory.h"
#include "src/intl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -501,23 +501,16 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
Handle<JSFunction> constructor(
isolate->native_context()->intl_collator_function());
- Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+ Handle<JSObject> collator_holder;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, collator_holder,
JSObject::New(constructor, constructor));
- // Set collator as embedder field of the resulting JS object.
- icu::Collator* collator =
- Collator::InitializeCollator(isolate, locale, options, resolved);
-
- if (!collator) return isolate->ThrowIllegalOperation();
-
- local_object->SetEmbedderField(0, reinterpret_cast<Smi*>(collator));
+ if (!Collator::InitializeCollator(isolate, collator_holder, locale, options,
+ resolved)) {
+ return isolate->ThrowIllegalOperation();
+ }
- Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
- GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- Collator::DeleteCollator,
- WeakCallbackType::kInternalFields);
- return *local_object;
+ return *collator_holder;
}
RUNTIME_FUNCTION(Runtime_InternalCompare) {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index c82a449bda..568849b3d9 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -41,10 +41,11 @@ class JSObjectWalkVisitor {
JSObjectWalkVisitor(ContextObject* site_context, DeepCopyHints hints)
: site_context_(site_context), hints_(hints) {}
- MUST_USE_RESULT MaybeHandle<JSObject> StructureWalk(Handle<JSObject> object);
+ V8_WARN_UNUSED_RESULT MaybeHandle<JSObject> StructureWalk(
+ Handle<JSObject> object);
protected:
- MUST_USE_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
+ V8_WARN_UNUSED_RESULT inline MaybeHandle<JSObject> VisitElementOrProperty(
Handle<JSObject> object, Handle<JSObject> value) {
Handle<AllocationSite> current_site = site_context()->EnterNewScope();
MaybeHandle<JSObject> copy_of_value = StructureWalk(value);
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index 4ed2071c7b..ee1602b712 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -45,9 +45,9 @@ RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
for (int i = 0; i < found_size; ++i) {
Handle<SharedFunctionInfo> shared = found[i];
SharedInfoWrapper info_wrapper = SharedInfoWrapper::Create(isolate);
- Handle<String> name(shared->name(), isolate);
- info_wrapper.SetProperties(name, shared->start_position(),
- shared->end_position(), shared);
+ Handle<String> name(shared->Name(), isolate);
+ info_wrapper.SetProperties(name, shared->StartPosition(),
+ shared->EndPosition(), shared);
result->set(i, *info_wrapper.GetJSArray());
}
return *isolate->factory()->NewJSArrayWithElements(result);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 90dddab211..b68315f286 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -427,7 +427,7 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
if (prototype->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(prototype);
- if (!function->shared()->has_shared_name()) {
+ if (!function->shared()->HasSharedName()) {
Handle<Map> function_map(function->map(), isolate);
if (!JSFunction::SetName(function, isolate->factory()->proto_string(),
isolate->factory()->empty_string())) {
@@ -588,24 +588,6 @@ RUNTIME_FUNCTION(Runtime_AddElement) {
}
-RUNTIME_FUNCTION(Runtime_AppendElement) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- CHECK(!value->IsTheHole(isolate));
-
- uint32_t index;
- CHECK(array->length()->ToArrayIndex(&index));
-
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSObject::AddDataElement(array, index, value, NONE));
- JSObject::ValidateElements(*array);
- return *array;
-}
-
-
RUNTIME_FUNCTION(Runtime_SetProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
@@ -839,7 +821,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
DCHECK(value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(value);
- DCHECK(!function->shared()->has_shared_name());
+ DCHECK(!function->shared()->HasSharedName());
Handle<Map> function_map(function->map(), isolate);
if (!JSFunction::SetName(function, name,
isolate->factory()->empty_string())) {
@@ -918,6 +900,12 @@ RUNTIME_FUNCTION(Runtime_ClassOf) {
return JSReceiver::cast(obj)->class_name();
}
+RUNTIME_FUNCTION(Runtime_GetFunctionName) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ return *JSFunction::GetName(isolate, function);
+}
RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
HandleScope scope(isolate);
@@ -927,7 +915,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (String::cast(getter->shared()->name())->length() == 0) {
+ if (String::cast(getter->shared()->Name())->length() == 0) {
Handle<Map> getter_map(getter->map(), isolate);
if (!JSFunction::SetName(getter, name, isolate->factory()->get_string())) {
return isolate->heap()->exception();
@@ -1059,7 +1047,7 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (String::cast(setter->shared()->name())->length() == 0) {
+ if (String::cast(setter->shared()->Name())->length() == 0) {
Handle<Map> setter_map(setter->map(), isolate);
if (!JSFunction::SetName(setter, name, isolate->factory()->set_string())) {
return isolate->heap()->exception();
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 2d3a4fda50..b2a7e8bae1 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -74,6 +74,7 @@ RUNTIME_FUNCTION(Runtime_RunMicrotaskCallback) {
MicrotaskCallback callback = ToCData<MicrotaskCallback>(microtask_callback);
void* data = ToCData<void*>(microtask_data);
callback(data);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index a10260c1e2..5340d31843 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -6,7 +6,7 @@
#include "src/arguments.h"
#include "src/elements.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/isolate-inl.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 920f37cf98..dd65c1dee6 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -529,7 +529,7 @@ void TruncateRegexpIndicesList(Isolate* isolate) {
} // namespace
template <typename ResultSeqString>
-MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
+V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -608,7 +608,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
return *result;
}
-MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
+V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -699,7 +699,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
}
template <typename ResultSeqString>
-MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
+V8_WARN_UNUSED_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
@@ -1300,10 +1300,9 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
// Legacy implementation of RegExp.prototype[Symbol.replace] which
// doesn't properly call the underlying exec method.
-MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
- Handle<JSRegExp> regexp,
- Handle<String> string,
- Handle<Object> replace_obj) {
+V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> string,
+ Handle<Object> replace_obj) {
// Functional fast-paths are dispatched directly by replace builtin.
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
DCHECK(!replace_obj->IsCallable());
@@ -1545,9 +1544,9 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
namespace {
-MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
- Handle<Object> object,
- uint32_t* out) {
+V8_WARN_UNUSED_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t* out) {
if (object->IsUndefined(isolate)) {
*out = kMaxUInt32;
return object;
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 3d2d7940a4..9483949674 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -196,19 +196,6 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
- CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, feedback_vector, 2);
-
- return DeclareGlobals(isolate, declarations, flags, feedback_vector);
-}
-
-// TODO(ishell): merge this with Runtime::kDeclareGlobals once interpreter
-// is able to pass feedback vector.
-RUNTIME_FUNCTION(Runtime_DeclareGlobalsForInterpreter) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
-
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
- CONVERT_SMI_ARG_CHECKED(flags, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
Handle<FeedbackVector> feedback_vector(closure->feedback_vector(), isolate);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 6b2f3467fc..78de914b72 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -211,11 +211,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// If the function has optimized code, ensure that we check for it and return.
if (function->HasOptimizedCode()) {
- if (!function->IsInterpreted()) {
- // For non I+TF path, install a shim which checks the optimization marker.
- function->set_code(
- isolate->builtins()->builtin(Builtins::kCheckOptimizationMarker));
- }
DCHECK(function->ChecksOptimizationMarker());
return isolate->heap()->undefined_value();
}
@@ -236,8 +231,14 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
: "non-concurrent");
}
- JSFunction::EnsureFeedbackVector(function);
+ // This function may not have been lazily compiled yet, even though its shared
+ // function has.
+ if (!function->is_compiled()) {
+ DCHECK(function->shared()->IsInterpreted());
+ function->set_code(*BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
+ }
+ JSFunction::EnsureFeedbackVector(function);
function->MarkForOptimization(concurrency_mode);
return isolate->heap()->undefined_value();
@@ -753,8 +754,8 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
// Doesn't have wasm data.
return isolate->heap()->false_value();
}
- if (function->shared()->code() !=
- isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
+ if (function->shared()->HasBuiltinId() &&
+ function->shared()->builtin_id() == Builtins::kInstantiateAsmJs) {
// Hasn't been compiled yet.
return isolate->heap()->false_value();
}
@@ -839,11 +840,24 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
#undef FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
+RUNTIME_FUNCTION(Runtime_ArraySpeciesProtector) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
+}
-RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
+RUNTIME_FUNCTION(Runtime_TypedArraySpeciesProtector) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
- return isolate->heap()->ToBoolean(isolate->IsSpeciesLookupChainIntact());
+ return isolate->heap()->ToBoolean(
+ isolate->IsTypedArraySpeciesLookupChainIntact());
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseSpeciesProtector) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(
+ isolate->IsPromiseSpeciesLookupChainIntact());
}
// Take a compiled wasm module, serialize it and copy the buffer into an array
@@ -977,16 +991,14 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
DCHECK(it.is_wasm());
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
- uint8_t* mem_start = reinterpret_cast<uint8_t*>(frame->wasm_instance()
- ->memory_object()
- ->array_buffer()
- ->allocation_base());
+ uint8_t* mem_start = reinterpret_cast<uint8_t*>(
+ frame->wasm_instance()->memory_object()->array_buffer()->backing_store());
int func_index = frame->function_index();
int pos = frame->position();
// TODO(titzer): eliminate dependency on WasmModule definition here.
int func_start =
frame->wasm_instance()->module()->functions[func_index].code.offset();
- wasm::ExecutionEngine eng = frame->wasm_code().is_liftoff()
+ wasm::ExecutionEngine eng = frame->wasm_code()->is_liftoff()
? wasm::ExecutionEngine::kLiftoff
: wasm::ExecutionEngine::kTurbofan;
wasm::TraceMemoryOperation(eng, info, func_index, pos - func_start,
@@ -999,15 +1011,9 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
- WasmCodeWrapper wrapper =
+ wasm::WasmCode* wasm_code =
WasmExportedFunction::cast(*function)->GetWasmCode();
- if (!wrapper.IsCodeObject()) {
- const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
- return isolate->heap()->ToBoolean(wasm_code->is_liftoff());
- } else {
- Handle<Code> wasm_code = wrapper.GetCode();
- return isolate->heap()->ToBoolean(!wasm_code->is_turbofanned());
- }
+ return isolate->heap()->ToBoolean(wasm_code->is_liftoff());
}
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
@@ -1025,12 +1031,7 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
DisallowHeapAllocation no_gc;
CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
- WasmSharedModuleData* shared = instance->compiled_module()->shared();
- CHECK(shared->has_lazy_compilation_orchestrator());
- auto* orchestrator = Managed<wasm::LazyCompilationOrchestrator>::cast(
- shared->lazy_compilation_orchestrator())
- ->get();
- orchestrator->FreezeLazyCompilationForTesting();
+ instance->compiled_module()->GetNativeModule()->set_lazy_compile_frozen(true);
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index f8fd3cc622..66d88f9860 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -6,7 +6,7 @@
#include "src/arguments.h"
#include "src/elements.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime.h"
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 0b002d0ec6..9f17d0a78d 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -9,8 +9,8 @@
#include "src/compiler/wasm-compiler.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
-#include "src/factory.h"
#include "src/frame-constants.h"
+#include "src/heap/factory.h"
#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/trap-handler/trap-handler.h"
@@ -31,18 +31,13 @@ WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
Address pc =
Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
- WasmInstanceObject* owning_instance = nullptr;
- if (FLAG_wasm_jit_to_native) {
- owning_instance = WasmInstanceObject::GetOwningInstance(
- isolate->wasm_engine()->code_manager()->LookupCode(pc));
- } else {
- owning_instance = WasmInstanceObject::GetOwningInstanceGC(
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code);
- }
+ WasmInstanceObject* owning_instance = WasmInstanceObject::GetOwningInstance(
+ isolate->wasm_engine()->code_manager()->LookupCode(pc));
CHECK_NOT_NULL(owning_instance);
return owning_instance;
}
+// TODO(titzer): rename to GetNativeContextFromWasmInstanceOnStackTop()
Context* GetWasmContextOnStackTop(Isolate* isolate) {
return GetWasmInstanceOnStackTop(isolate)
->compiled_module()
@@ -82,8 +77,8 @@ RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
DCHECK_NULL(isolate->context());
isolate->set_context(instance->compiled_module()->native_context());
- return *isolate->factory()->NewNumberFromInt(
- WasmInstanceObject::GrowMemory(isolate, instance, delta_pages));
+ return *isolate->factory()->NewNumberFromInt(WasmMemoryObject::Grow(
+ isolate, handle(instance->memory_object(), isolate), delta_pages));
}
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
@@ -223,7 +218,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
CHECK_LT(index, Smi::ToInt(values->length()));
CONVERT_SMI_ARG_CHECKED(value, 1);
auto* vals =
- reinterpret_cast<uint16_t*>(values->GetBuffer()->allocation_base());
+ reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
vals[index] = static_cast<uint16_t>(value);
}
}
@@ -292,19 +287,17 @@ RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
return isolate->stack_guard()->HandleInterrupts();
}
-RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
- DCHECK_EQ(0, args.length());
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_WasmCompileLazy) {
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance_on_stack, 0);
+ // TODO(titzer): The location on the stack is not visited by the
+ // roots visitor because the type of the frame is a special
+ // WASM builtin. Reopen the handle in a handle scope as a workaround.
HandleScope scope(isolate);
+ Handle<WasmInstanceObject> instance(*instance_on_stack, isolate);
- if (FLAG_wasm_jit_to_native) {
- Address new_func = wasm::CompileLazy(isolate);
- // The alternative to this is having 2 lazy compile builtins. The builtins
- // are part of the snapshot, so the flag has no impact on the codegen there.
- return reinterpret_cast<Object*>(new_func - Code::kHeaderSize +
- kHeapObjectTag);
- } else {
- return *wasm::CompileLazyOnGCHeap(isolate);
- }
+ Address entrypoint = wasm::CompileLazy(isolate, instance);
+ return MakePair(reinterpret_cast<Object*>(entrypoint), *instance);
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 26880cdafa..3ae82d41c5 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -98,6 +98,39 @@ void InitializeIntrinsicFunctionNames() {
} // namespace
+bool Runtime::IsNonReturning(FunctionId id) {
+ switch (id) {
+ case Runtime::kThrowUnsupportedSuperError:
+ case Runtime::kThrowConstructorNonCallableError:
+ case Runtime::kThrowStaticPrototypeError:
+ case Runtime::kThrowSuperAlreadyCalledError:
+ case Runtime::kThrowSuperNotCalled:
+ case Runtime::kReThrow:
+ case Runtime::kThrow:
+ case Runtime::kThrowApplyNonFunction:
+ case Runtime::kThrowCalledNonCallable:
+ case Runtime::kThrowConstructedNonConstructable:
+ case Runtime::kThrowConstructorReturnedNonObject:
+ case Runtime::kThrowInvalidStringLength:
+ case Runtime::kThrowInvalidTypedArrayAlignment:
+ case Runtime::kThrowIteratorResultNotAnObject:
+ case Runtime::kThrowThrowMethodMissing:
+ case Runtime::kThrowSymbolIteratorInvalid:
+ case Runtime::kThrowNotConstructor:
+ case Runtime::kThrowRangeError:
+ case Runtime::kThrowReferenceError:
+ case Runtime::kThrowStackOverflow:
+ case Runtime::kThrowSymbolAsyncIteratorInvalid:
+ case Runtime::kThrowTypeError:
+ case Runtime::kThrowConstAssignError:
+ case Runtime::kThrowWasmError:
+ case Runtime::kThrowWasmStackOverflow:
+ return true;
+ default:
+ return false;
+ }
+}
+
const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
int length) {
base::CallOnce(&initialize_function_name_map_once,
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 2bfd280803..48a63d500d 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -36,33 +36,32 @@ namespace internal {
// A variable number of arguments is specified by a -1, additional restrictions
// are specified by inline comments
-#define FOR_EACH_INTRINSIC_ARRAY(F) \
- F(TransitionElementsKind, 2, 1) \
- F(RemoveArrayHoles, 2, 1) \
- F(MoveArrayContents, 2, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
- F(TrySliceSimpleNonFastElements, 3, 1) \
- F(NewArray, -1 /* >= 3 */, 1) \
- F(NormalizeElements, 1, 1) \
- F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
- F(IsArray, 1, 1) \
- F(ArrayIsArray, 1, 1) \
- F(ArraySpeciesConstructor, 1, 1) \
- F(ArrayIncludes_Slow, 3, 1) \
- F(ArrayIndexOf, 3, 1) \
- F(SpreadIterablePrepare, 1, 1)
-
-#define FOR_EACH_INTRINSIC_ATOMICS(F) \
- F(AtomicsExchange, 3, 1) \
- F(AtomicsCompareExchange, 4, 1) \
- F(AtomicsAdd, 3, 1) \
- F(AtomicsSub, 3, 1) \
- F(AtomicsAnd, 3, 1) \
- F(AtomicsOr, 3, 1) \
- F(AtomicsXor, 3, 1) \
- F(AtomicsNumWaitersForTesting, 2, 1) \
+#define FOR_EACH_INTRINSIC_ARRAY(F) \
+ F(ArrayIncludes_Slow, 3, 1) \
+ F(ArrayIndexOf, 3, 1) \
+ F(ArrayIsArray, 1, 1) \
+ F(ArraySpeciesConstructor, 1, 1) \
+ F(EstimateNumberOfElements, 1, 1) \
+ F(GetArrayKeys, 2, 1) \
+ F(GrowArrayElements, 2, 1) \
+ F(HasComplexElements, 1, 1) \
+ F(IsArray, 1, 1) \
+ F(MoveArrayContents, 2, 1) \
+ F(NewArray, -1 /* >= 3 */, 1) \
+ F(NormalizeElements, 1, 1) \
+ F(RemoveArrayHoles, 2, 1) \
+ F(TransitionElementsKind, 2, 1) \
+ F(TrySliceSimpleNonFastElements, 3, 1)
+
+#define FOR_EACH_INTRINSIC_ATOMICS(F) \
+ F(AtomicsAdd, 3, 1) \
+ F(AtomicsAnd, 3, 1) \
+ F(AtomicsCompareExchange, 4, 1) \
+ F(AtomicsExchange, 3, 1) \
+ F(AtomicsNumWaitersForTesting, 2, 1) \
+ F(AtomicsOr, 3, 1) \
+ F(AtomicsSub, 3, 1) \
+ F(AtomicsXor, 3, 1) \
F(SetAllowAtomicsWait, 1, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F) \
@@ -78,106 +77,108 @@ namespace internal {
F(ToBigInt, 1, 1)
#define FOR_EACH_INTRINSIC_CLASSES(F) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(ThrowConstructorNonCallableError, 1, 1) \
- F(ThrowStaticPrototypeError, 0, 1) \
- F(ThrowSuperAlreadyCalledError, 0, 1) \
- F(ThrowSuperNotCalled, 0, 1) \
- F(ThrowNotSuperConstructor, 2, 1) \
- F(HomeObjectSymbol, 0, 1) \
F(DefineClass, -1 /* >= 3 */, 1) \
+ F(GetSuperConstructor, 1, 1) \
+ F(HomeObjectSymbol, 0, 1) \
F(LoadFromSuper, 3, 1) \
F(LoadKeyedFromSuper, 3, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(GetSuperConstructor, 1, 1)
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(ThrowConstructorNonCallableError, 1, 1) \
+ F(ThrowNotSuperConstructor, 2, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowSuperAlreadyCalledError, 0, 1) \
+ F(ThrowSuperNotCalled, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
- F(TheHole, 0, 1) \
- F(SetGrow, 1, 1) \
- F(SetShrink, 1, 1) \
- F(SetIteratorClone, 1, 1) \
- F(MapShrink, 1, 1) \
- F(MapGrow, 1, 1) \
- F(MapIteratorClone, 1, 1) \
F(GetWeakMapEntries, 2, 1) \
- F(WeakCollectionDelete, 3, 1) \
- F(WeakCollectionSet, 4, 1) \
F(GetWeakSetValues, 2, 1) \
F(IsJSMap, 1, 1) \
F(IsJSSet, 1, 1) \
F(IsJSWeakMap, 1, 1) \
- F(IsJSWeakSet, 1, 1)
+ F(IsJSWeakSet, 1, 1) \
+ F(MapGrow, 1, 1) \
+ F(MapIteratorClone, 1, 1) \
+ F(MapShrink, 1, 1) \
+ F(SetGrow, 1, 1) \
+ F(SetIteratorClone, 1, 1) \
+ F(SetShrink, 1, 1) \
+ F(TheHole, 0, 1) \
+ F(WeakCollectionDelete, 3, 1) \
+ F(WeakCollectionSet, 4, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F) \
+ F(CompileForOnStackReplacement, 1, 1) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
- F(FunctionFirstExecution, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
F(EvictOptimizedCodeSlot, 1, 1) \
+ F(FunctionFirstExecution, 1, 1) \
+ F(InstantiateAsmJs, 4, 1) \
F(NotifyDeoptimized, 0, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(ResolvePossiblyDirectEval, 6, 1) \
- F(InstantiateAsmJs, 4, 1)
+ F(ResolvePossiblyDirectEval, 6, 1)
#define FOR_EACH_INTRINSIC_DATE(F) \
- F(IsDate, 1, 1) \
- F(DateCurrentTime, 0, 1)
+ F(DateCurrentTime, 0, 1) \
+ F(IsDate, 1, 1)
#define FOR_EACH_INTRINSIC_DEBUG(F) \
- F(HandleDebuggerStatement, 0, 1) \
- F(ScheduleBreak, 0, 1) \
- F(DebugGetInternalProperties, 1, 1) \
- F(DebugGetPropertyDetails, 2, 1) \
- F(DebugGetProperty, 2, 1) \
- F(DebugPropertyKindFromDetails, 1, 1) \
- F(DebugPropertyAttributesFromDetails, 1, 1) \
- F(CheckExecutionState, 1, 1) \
- F(GetFrameCount, 1, 1) \
- F(GetFrameDetails, 2, 1) \
- F(GetScopeCount, 2, 1) \
- F(GetScopeDetails, 4, 1) \
- F(GetAllScopesDetails, 4, 1) \
- F(GetFunctionScopeCount, 1, 1) \
- F(GetFunctionScopeDetails, 2, 1) \
- F(GetGeneratorScopeCount, 1, 1) \
- F(GetGeneratorScopeDetails, 2, 1) \
- F(SetScopeVariableValue, 6, 1) \
- F(GetBreakLocations, 1, 1) \
F(ChangeBreakOnException, 2, 1) \
- F(IsBreakOnException, 1, 1) \
- F(PrepareStep, 2, 1) \
+ F(CheckExecutionState, 1, 1) \
F(ClearStepping, 0, 1) \
+ F(CollectGarbage, 1, 1) \
+ F(DebugApplyInstrumentation, 1, 1) \
+ F(DebugAsyncFunctionPromiseCreated, 1, 1) \
+ F(DebugBreakAtEntry, 1, 1) \
+ F(DebugCollectCoverage, 0, 1) \
+ F(DebugConstructedBy, 2, 1) \
F(DebugEvaluate, 5, 1) \
F(DebugEvaluateGlobal, 2, 1) \
+ F(DebugGetInternalProperties, 1, 1) \
F(DebugGetLoadedScripts, 0, 1) \
- F(DebugReferencedBy, 3, 1) \
- F(DebugConstructedBy, 2, 1) \
+ F(DebugGetProperty, 2, 1) \
+ F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetPrototype, 1, 1) \
+ F(DebugIsActive, 0, 1) \
+ F(DebugOnFunctionCall, 1, 1) \
+ F(DebugPopPromise, 0, 1) \
+ F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
+ F(DebugPropertyAttributesFromDetails, 1, 1) \
+ F(DebugPropertyKindFromDetails, 1, 1) \
+ F(DebugPushPromise, 1, 1) \
+ F(DebugReferencedBy, 3, 1) \
F(DebugSetScriptSource, 2, 1) \
- F(FunctionGetInferredName, 1, 1) \
+ F(DebugToggleBlockCoverage, 1, 1) \
+ F(DebugTogglePreciseCoverage, 1, 1) \
F(FunctionGetDebugName, 1, 1) \
+ F(FunctionGetInferredName, 1, 1) \
+ F(GetAllScopesDetails, 4, 1) \
+ F(GetBreakLocations, 1, 1) \
F(GetDebugContext, 0, 1) \
- F(CollectGarbage, 1, 1) \
+ F(GetFrameCount, 1, 1) \
+ F(GetFrameDetails, 2, 1) \
+ F(GetFunctionScopeCount, 1, 1) \
+ F(GetFunctionScopeDetails, 2, 1) \
+ F(GetGeneratorScopeCount, 1, 1) \
+ F(GetGeneratorScopeDetails, 2, 1) \
F(GetHeapUsage, 0, 1) \
+ F(GetScopeCount, 2, 1) \
+ F(GetScopeDetails, 4, 1) \
F(GetScript, 1, 1) \
+ F(HandleDebuggerStatement, 0, 1) \
+ F(IncBlockCounter, 2, 1) \
+ F(IsBreakOnException, 1, 1) \
+ F(PrepareStep, 2, 1) \
+ F(ScheduleBreak, 0, 1) \
F(ScriptLineCount, 1, 1) \
- F(ScriptLocationFromLine, 4, 1) \
F(ScriptLocationFromLine2, 4, 1) \
- F(ScriptPositionInfo, 3, 1) \
+ F(ScriptLocationFromLine, 4, 1) \
F(ScriptPositionInfo2, 3, 1) \
- F(DebugOnFunctionCall, 1, 1) \
- F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
- F(DebugPushPromise, 1, 1) \
- F(DebugPopPromise, 0, 1) \
- F(DebugAsyncFunctionPromiseCreated, 1, 1) \
- F(DebugIsActive, 0, 1) \
- F(DebugCollectCoverage, 0, 1) \
- F(DebugTogglePreciseCoverage, 1, 1) \
- F(DebugToggleBlockCoverage, 1, 1) \
- F(IncBlockCounter, 2, 1)
+ F(ScriptPositionInfo, 3, 1) \
+ F(SetScopeVariableValue, 6, 1)
#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
@@ -206,66 +207,66 @@ namespace internal {
F(InterpreterDeserializeLazy, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
+ F(Call, -1 /* >= 2 */, 1) \
+ F(FunctionGetContextData, 1, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionGetScript, 1, 1) \
F(FunctionGetScriptId, 1, 1) \
- F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
- F(FunctionGetContextData, 1, 1) \
+ F(FunctionGetSourceCode, 1, 1) \
F(FunctionIsAPIFunction, 1, 1) \
- F(SetCode, 2, 1) \
- F(SetNativeFlag, 1, 1) \
+ F(FunctionToString, 1, 1) \
F(IsConstructor, 1, 1) \
- F(Call, -1 /* >= 2 */, 1) \
F(IsFunction, 1, 1) \
- F(FunctionToString, 1, 1)
-
-#define FOR_EACH_INTRINSIC_GENERATOR(F) \
- F(CreateJSGeneratorObject, 2, 1) \
- F(GeneratorClose, 1, 1) \
- F(GeneratorGetFunction, 1, 1) \
- F(GeneratorGetReceiver, 1, 1) \
- F(GeneratorGetInputOrDebugPos, 1, 1) \
- F(AsyncFunctionAwaitCaught, 3, 1) \
- F(AsyncFunctionAwaitUncaught, 3, 1) \
- F(AsyncGeneratorResolve, 3, 1) \
- F(AsyncGeneratorReject, 2, 1) \
- F(AsyncGeneratorYield, 3, 1) \
- F(AsyncGeneratorAwaitCaught, 2, 1) \
- F(AsyncGeneratorAwaitUncaught, 2, 1) \
- F(GeneratorGetContinuation, 1, 1) \
- F(GeneratorGetSourcePosition, 1, 1) \
- F(GeneratorGetResumeMode, 1, 1) \
- F(AsyncGeneratorHasCatchHandlerForPC, 1, 1)
+ F(SetCode, 2, 1) \
+ F(SetNativeFlag, 1, 1)
+
+#define FOR_EACH_INTRINSIC_GENERATOR(F) \
+ F(AsyncFunctionAwaitCaught, 3, 1) \
+ F(AsyncFunctionAwaitUncaught, 3, 1) \
+ F(AsyncGeneratorAwaitCaught, 2, 1) \
+ F(AsyncGeneratorAwaitUncaught, 2, 1) \
+ F(AsyncGeneratorHasCatchHandlerForPC, 1, 1) \
+ F(AsyncGeneratorReject, 2, 1) \
+ F(AsyncGeneratorResolve, 3, 1) \
+ F(AsyncGeneratorYield, 3, 1) \
+ F(CreateJSGeneratorObject, 2, 1) \
+ F(GeneratorClose, 1, 1) \
+ F(GeneratorGetContinuation, 1, 1) \
+ F(GeneratorGetFunction, 1, 1) \
+ F(GeneratorGetInputOrDebugPos, 1, 1) \
+ F(GeneratorGetReceiver, 1, 1) \
+ F(GeneratorGetResumeMode, 1, 1) \
+ F(GeneratorGetSourcePosition, 1, 1)
#ifdef V8_INTL_SUPPORT
#define FOR_EACH_INTRINSIC_INTL(F) \
- F(CanonicalizeLanguageTag, 1, 1) \
F(AvailableLocalesOf, 1, 1) \
- F(GetDefaultICULocale, 0, 1) \
- F(IsInitializedIntlObject, 1, 1) \
- F(IsInitializedIntlObjectOfType, 2, 1) \
- F(MarkAsInitializedIntlObjectOfType, 2, 1) \
+ F(BreakIteratorAdoptText, 2, 1) \
+ F(BreakIteratorBreakType, 1, 1) \
+ F(BreakIteratorCurrent, 1, 1) \
+ F(BreakIteratorFirst, 1, 1) \
+ F(BreakIteratorNext, 1, 1) \
+ F(CanonicalizeLanguageTag, 1, 1) \
+ F(CreateBreakIterator, 3, 1) \
+ F(CreateCollator, 3, 1) \
F(CreateDateTimeFormat, 3, 1) \
- F(InternalDateFormat, 2, 1) \
- F(InternalDateFormatToParts, 2, 1) \
F(CreateNumberFormat, 3, 1) \
- F(InternalNumberFormat, 2, 1) \
+ F(CreatePluralRules, 3, 1) \
F(CurrencyDigits, 1, 1) \
- F(CreateCollator, 3, 1) \
+ F(DateCacheVersion, 0, 1) \
+ F(GetDefaultICULocale, 0, 1) \
F(InternalCompare, 3, 1) \
- F(CreatePluralRules, 3, 1) \
+ F(InternalDateFormat, 2, 1) \
+ F(InternalDateFormatToParts, 2, 1) \
+ F(InternalNumberFormat, 2, 1) \
+ F(IsInitializedIntlObject, 1, 1) \
+ F(IsInitializedIntlObjectOfType, 2, 1) \
+ F(MarkAsInitializedIntlObjectOfType, 2, 1) \
F(PluralRulesSelect, 2, 1) \
- F(CreateBreakIterator, 3, 1) \
- F(BreakIteratorAdoptText, 2, 1) \
- F(BreakIteratorFirst, 1, 1) \
- F(BreakIteratorNext, 1, 1) \
- F(BreakIteratorCurrent, 1, 1) \
- F(BreakIteratorBreakType, 1, 1) \
- F(StringToLowerCaseIntl, 1, 1) \
- F(StringToUpperCaseIntl, 1, 1) \
F(StringLocaleConvertCase, 3, 1) \
- F(DateCacheVersion, 0, 1)
+ F(StringToLowerCaseIntl, 1, 1) \
+ F(StringToUpperCaseIntl, 1, 1)
#else
#define FOR_EACH_INTRINSIC_INTL(F)
#endif
@@ -275,12 +276,14 @@ namespace internal {
F(AllocateInTargetSpace, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
+ F(AllowDynamicFunction, 1, 1) \
F(CheckIsBootstrapping, 0, 1) \
F(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
+ F(CreateTemplateObject, 1, 1) \
F(DeserializeLazy, 1, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportFromRuntime, 1, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(IncrementUseCounter, 1, 1) \
F(IncrementUseCounterConstructorReturnNonUndefinedPrimitive, 0, 1) \
F(InstallToContext, 1, 1) \
@@ -291,9 +294,10 @@ namespace internal {
F(NewTypeError, 2, 1) \
F(OrdinaryHasInstance, 2, 1) \
F(PromoteScheduledException, 0, 1) \
+ F(ReportMessage, 1, 1) \
F(ReThrow, 1, 1) \
- F(RunMicrotasks, 0, 1) \
F(RunMicrotaskCallback, 2, 1) \
+ F(RunMicrotasks, 0, 1) \
F(StackGuard, 0, 1) \
F(Throw, 1, 1) \
F(ThrowApplyNonFunction, 1, 1) \
@@ -303,37 +307,34 @@ namespace internal {
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowInvalidTypedArrayAlignment, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowThrowMethodMissing, 0, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
F(ThrowNotConstructor, 1, 1) \
F(ThrowRangeError, -1 /* >= 1 */, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowThrowMethodMissing, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
F(Typeof, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1) \
- F(AllowDynamicFunction, 1, 1) \
- F(CreateTemplateObject, 1, 1) \
- F(ReportMessage, 1, 1)
+ F(UnwindAndFindExceptionHandler, 0, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
- F(CreateRegExpLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1)
+ F(CreateRegExpLiteral, 4, 1)
#define FOR_EACH_INTRINSIC_LIVEEDIT(F) \
+ F(LiveEditCheckAndDropActivations, 3, 1) \
+ F(LiveEditCompareStrings, 2, 1) \
F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
- F(LiveEditGatherCompileInfo, 2, 1) \
- F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditFunctionSourceUpdated, 2, 1) \
- F(LiveEditReplaceFunctionCode, 2, 1) \
F(LiveEditFixupScript, 2, 1) \
F(LiveEditFunctionSetScript, 2, 1) \
- F(LiveEditReplaceRefToNestedFunction, 3, 1) \
+ F(LiveEditFunctionSourceUpdated, 2, 1) \
+ F(LiveEditGatherCompileInfo, 2, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
- F(LiveEditCheckAndDropActivations, 3, 1) \
- F(LiveEditCompareStrings, 2, 1) \
+ F(LiveEditReplaceFunctionCode, 2, 1) \
+ F(LiveEditReplaceRefToNestedFunction, 3, 1) \
+ F(LiveEditReplaceScript, 3, 1) \
F(LiveEditRestartFrame, 2, 1)
#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
@@ -343,109 +344,109 @@ namespace internal {
F(GetImportMetaObject, 0, 1) \
F(GetModuleNamespace, 1, 1)
-#define FOR_EACH_INTRINSIC_NUMBERS(F) \
- F(IsValidSmi, 1, 1) \
- F(StringToNumber, 1, 1) \
- F(StringParseInt, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(NumberToStringSkipCache, 1, 1) \
- F(NumberToSmi, 1, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- F(MaxSmi, 0, 1) \
- F(IsSmi, 1, 1) \
- F(GetHoleNaNUpper, 0, 1) \
- F(GetHoleNaNLower, 0, 1)
+#define FOR_EACH_INTRINSIC_NUMBERS(F) \
+ F(GetHoleNaNLower, 0, 1) \
+ F(GetHoleNaNUpper, 0, 1) \
+ F(IsSmi, 1, 1) \
+ F(IsValidSmi, 1, 1) \
+ F(MaxSmi, 0, 1) \
+ F(NumberToSmi, 1, 1) \
+ F(NumberToStringSkipCache, 1, 1) \
+ F(SmiLexicographicCompare, 2, 1) \
+ F(StringParseFloat, 1, 1) \
+ F(StringParseInt, 2, 1) \
+ F(StringToNumber, 1, 1)
#define FOR_EACH_INTRINSIC_OBJECT(F) \
F(AddDictionaryProperty, 3, 1) \
- F(GetPrototype, 1, 1) \
- F(ObjectKeys, 1, 1) \
- F(ObjectHasOwnProperty, 2, 1) \
- F(ObjectCreate, 2, 1) \
- F(InternalSetPrototype, 2, 1) \
- F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
- F(ObjectValues, 1, 1) \
- F(ObjectValuesSkipFastPath, 1, 1) \
- F(ObjectEntries, 1, 1) \
- F(ObjectEntriesSkipFastPath, 1, 1) \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(AddNamedProperty, 4, 1) \
- F(SetProperty, 4, 1) \
F(AddElement, 3, 1) \
- F(AppendElement, 2, 1) \
- F(DeleteProperty, 3, 1) \
- F(ShrinkPropertyDictionary, 1, 1) \
- F(HasProperty, 2, 1) \
- F(GetOwnPropertyKeys, 2, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(ToFastProperties, 1, 1) \
+ F(AddNamedProperty, 4, 1) \
+ F(AddPrivateField, 3, 1) \
F(AllocateHeapNumber, 0, 1) \
- F(NewObject, 2, 1) \
- F(CompleteInobjectSlackTrackingForMap, 1, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(DefineDataPropertyInLiteral, 6, 1) \
- F(CollectTypeProfile, 3, 1) \
- F(HasFastPackedElements, 1, 1) \
- F(ValueOf, 1, 1) \
- F(IsJSReceiver, 1, 1) \
F(ClassOf, 1, 1) \
+ F(CollectTypeProfile, 3, 1) \
+ F(CompleteInobjectSlackTrackingForMap, 1, 1) \
F(CopyDataProperties, 2, 1) \
F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
+ F(CreateDataProperty, 3, 1) \
+ F(CreateIterResultObject, 2, 1) \
+ F(DefineAccessorPropertyUnchecked, 5, 1) \
+ F(DefineDataPropertyInLiteral, 6, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
- F(DefineSetterPropertyUnchecked, 4, 1) \
F(DefineMethodsInternal, 3, 1) \
+ F(DefineSetterPropertyUnchecked, 4, 1) \
+ F(DeleteProperty, 3, 1) \
+ F(GetFunctionName, 1, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(GetOwnPropertyDescriptor, 2, 1) \
+ F(GetOwnPropertyKeys, 2, 1) \
+ F(GetProperty, 2, 1) \
+ F(GetPrototype, 1, 1) \
+ F(HasFastPackedElements, 1, 1) \
+ F(HasInPrototypeChain, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(InternalSetPrototype, 2, 1) \
+ F(IsJSReceiver, 1, 1) \
+ F(IterableToListCanBeElided, 1, 1) \
+ F(KeyedGetProperty, 2, 1) \
+ F(NewObject, 2, 1) \
+ F(ObjectCreate, 2, 1) \
+ F(ObjectEntries, 1, 1) \
+ F(ObjectEntriesSkipFastPath, 1, 1) \
+ F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectKeys, 1, 1) \
+ F(ObjectValues, 1, 1) \
+ F(ObjectValuesSkipFastPath, 1, 1) \
+ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
+ F(SameValue, 2, 1) \
+ F(SameValueZero, 2, 1) \
+ F(SetProperty, 4, 1) \
+ F(ShrinkPropertyDictionary, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(ToInteger, 1, 1) \
+ F(ToLength, 1, 1) \
+ F(ToName, 1, 1) \
+ F(ToNumber, 1, 1) \
+ F(ToNumeric, 1, 1) \
F(ToObject, 1, 1) \
F(ToPrimitive, 1, 1) \
F(ToPrimitive_Number, 1, 1) \
- F(ToNumber, 1, 1) \
- F(ToNumeric, 1, 1) \
- F(ToInteger, 1, 1) \
- F(ToLength, 1, 1) \
F(ToString, 1, 1) \
- F(ToName, 1, 1) \
- F(SameValue, 2, 1) \
- F(SameValueZero, 2, 1) \
- F(HasInPrototypeChain, 2, 1) \
- F(CreateIterResultObject, 2, 1) \
- F(CreateDataProperty, 3, 1) \
- F(AddPrivateField, 3, 1) \
- F(IterableToListCanBeElided, 1, 1) \
- F(GetOwnPropertyDescriptor, 2, 1)
+ F(TryMigrateInstance, 1, 1) \
+ F(ValueOf, 1, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Add, 2, 1) \
F(Equal, 2, 1) \
- F(NotEqual, 2, 1) \
- F(StrictEqual, 2, 1) \
- F(StrictNotEqual, 2, 1) \
- F(LessThan, 2, 1) \
F(GreaterThan, 2, 1) \
+ F(GreaterThanOrEqual, 2, 1) \
+ F(LessThan, 2, 1) \
F(LessThanOrEqual, 2, 1) \
- F(GreaterThanOrEqual, 2, 1)
+ F(NotEqual, 2, 1) \
+ F(StrictEqual, 2, 1) \
+ F(StrictNotEqual, 2, 1)
#define FOR_EACH_INTRINSIC_PROMISE(F) \
F(EnqueueMicrotask, 1, 1) \
- F(PromiseHookInit, 2, 1) \
- F(PromiseHookBefore, 1, 1) \
F(PromiseHookAfter, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookInit, 2, 1) \
F(PromiseMarkAsHandled, 1, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
- F(PromiseRevokeReject, 1, 1) \
F(PromiseResult, 1, 1) \
+ F(PromiseRevokeReject, 1, 1) \
F(PromiseStatus, 1, 1) \
F(RejectPromise, 3, 1) \
F(ResolvePromise, 2, 1)
-#define FOR_EACH_INTRINSIC_PROXY(F) \
- F(IsJSProxy, 1, 1) \
- F(JSProxyGetTarget, 1, 1) \
- F(JSProxyGetHandler, 1, 1) \
- F(GetPropertyWithReceiver, 2, 1) \
- F(CheckProxyHasTrap, 2, 1) \
- F(SetPropertyWithReceiver, 5, 1) \
- F(CheckProxyGetSetTrapResult, 2, 1)
+#define FOR_EACH_INTRINSIC_PROXY(F) \
+ F(CheckProxyGetSetTrapResult, 2, 1) \
+ F(CheckProxyHasTrap, 2, 1) \
+ F(GetPropertyWithReceiver, 2, 1) \
+ F(IsJSProxy, 1, 1) \
+ F(JSProxyGetHandler, 1, 1) \
+ F(JSProxyGetTarget, 1, 1) \
+ F(SetPropertyWithReceiver, 5, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F) \
F(IsRegExp, 1, 1) \
@@ -459,60 +460,59 @@ namespace internal {
F(StringSplit, 3, 1)
#define FOR_EACH_INTRINSIC_SCOPES(F) \
- F(ThrowConstAssignError, 0, 1) \
- F(DeclareGlobals, 3, 1) \
- F(DeclareGlobalsForInterpreter, 3, 1) \
F(DeclareEvalFunction, 2, 1) \
F(DeclareEvalVar, 1, 1) \
- F(NewSloppyArguments_Generic, 1, 1) \
- F(NewStrictArguments, 1, 1) \
- F(NewRestParameter, 1, 1) \
- F(NewSloppyArguments, 3, 1) \
+ F(DeclareGlobals, 3, 1) \
+ F(DeleteLookupSlot, 1, 1) \
+ F(LoadLookupSlot, 1, 1) \
+ F(LoadLookupSlotInsideTypeof, 1, 1) \
F(NewArgumentsElements, 3, 1) \
F(NewClosure, 2, 1) \
F(NewClosure_Tenured, 2, 1) \
- F(NewScriptContext, 2, 1) \
F(NewFunctionContext, 2, 1) \
+ F(NewRestParameter, 1, 1) \
+ F(NewScriptContext, 2, 1) \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewSloppyArguments_Generic, 1, 1) \
+ F(NewStrictArguments, 1, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(PushCatchContext, 4, 1) \
F(PushModuleContext, 3, 1) \
F(PushWithContext, 3, 1) \
- F(PushCatchContext, 4, 1) \
- F(PushBlockContext, 2, 1) \
- F(DeleteLookupSlot, 1, 1) \
- F(LoadLookupSlot, 1, 1) \
- F(LoadLookupSlotInsideTypeof, 1, 1) \
F(StoreLookupSlot_Sloppy, 2, 1) \
F(StoreLookupSlot_SloppyHoisting, 2, 1) \
- F(StoreLookupSlot_Strict, 2, 1)
+ F(StoreLookupSlot_Strict, 2, 1) \
+ F(ThrowConstAssignError, 0, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F) \
+ F(FlattenString, 1, 1) \
F(GetSubstitution, 5, 1) \
- F(StringReplaceOneCharWithString, 3, 1) \
+ F(InternalizeString, 1, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
+ F(StringAdd, 2, 1) \
+ F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringEqual, 2, 1) \
+ F(StringGreaterThan, 2, 1) \
+ F(StringGreaterThanOrEqual, 2, 1) \
F(StringIncludes, 3, 1) \
- F(StringTrim, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
- F(StringSubstring, 3, 1) \
- F(StringAdd, 2, 1) \
- F(InternalizeString, 1, 1) \
- F(StringCharCodeAt, 2, 1) \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- F(StringToArray, 2, 1) \
F(StringLessThan, 2, 1) \
F(StringLessThanOrEqual, 2, 1) \
- F(StringGreaterThan, 2, 1) \
- F(StringGreaterThanOrEqual, 2, 1) \
- F(StringEqual, 2, 1) \
+ F(StringMaxLength, 0, 1) \
F(StringNotEqual, 2, 1) \
- F(FlattenString, 1, 1) \
- F(StringCharFromCode, 1, 1) \
- F(StringMaxLength, 0, 1)
+ F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringSubstring, 3, 1) \
+ F(StringToArray, 2, 1) \
+ F(StringTrim, 2, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
- F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
F(CreatePrivateFieldSymbol, 0, 1) \
+ F(CreatePrivateSymbol, -1 /* <= 1 */, 1) \
F(SymbolDescription, 1, 1) \
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -556,11 +556,6 @@ namespace internal {
F(HasFixedUint8ClampedElements, 1, 1) \
F(HasFixedUint8Elements, 1, 1) \
F(HasHoleyElements, 1, 1) \
- F(IsJSError, 1, 1) \
- F(IsJSGeneratorObject, 1, 1) \
- F(IsJSMapIterator, 1, 1) \
- F(IsScriptWrapper, 1, 1) \
- F(IsJSSetIterator, 1, 1) \
F(HasObjectElements, 1, 1) \
F(HasSloppyArgumentsElements, 1, 1) \
F(HasSmiElements, 1, 1) \
@@ -570,7 +565,12 @@ namespace internal {
F(InNewSpace, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsJSError, 1, 1) \
+ F(IsJSGeneratorObject, 1, 1) \
+ F(IsJSMapIterator, 1, 1) \
+ F(IsJSSetIterator, 1, 1) \
F(IsLiftoffFunction, 1, 1) \
+ F(IsScriptWrapper, 1, 1) \
F(IsWasmCode, 1, 1) \
F(IsWasmTrapHandlerEnabled, 0, 1) \
F(NativeScriptsCount, 0, 1) \
@@ -587,7 +587,9 @@ namespace internal {
F(SetForceSlowPath, 1, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
- F(SpeciesProtector, 0, 1) \
+ F(ArraySpeciesProtector, 0, 1) \
+ F(TypedArraySpeciesProtector, 0, 1) \
+ F(PromiseSpeciesProtector, 0, 1) \
F(SystemBreak, 0, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
@@ -600,31 +602,31 @@ namespace internal {
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferNeuter, 1, 1) \
- F(TypedArrayCopyElements, 3, 1) \
F(ArrayBufferViewWasNeutered, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
+ F(IsTypedArray, 1, 1) \
+ F(TypedArrayCopyElements, 3, 1) \
F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArraySortFast, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
F(TypedArraySet, 2, 1) \
- F(IsTypedArray, 1, 1)
+ F(TypedArraySortFast, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F) \
- F(WasmGrowMemory, 1, 1) \
F(ThrowWasmError, 1, 1) \
F(ThrowWasmStackOverflow, 0, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmThrowCreate, 2, 1) \
- F(WasmThrow, 0, 1) \
- F(WasmGetExceptionRuntimeId, 0, 1) \
- F(WasmExceptionSetElement, 2, 1) \
F(WasmExceptionGetElement, 1, 1) \
+ F(WasmExceptionSetElement, 2, 1) \
+ F(WasmGetExceptionRuntimeId, 0, 1) \
+ F(WasmGrowMemory, 1, 1) \
F(WasmRunInterpreter, 2, 1) \
F(WasmStackGuard, 0, 1) \
- F(WasmCompileLazy, 0, 1)
+ F(WasmThrow, 0, 1) \
+ F(WasmThrowCreate, 2, 1) \
+ F(WasmThrowTypeError, 0, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(LoadLookupSlotForCall, 1, 2) \
- F(DebugBreakOnBytecode, 1, 2)
+ F(WasmCompileLazy, 1, 2) \
+ F(DebugBreakOnBytecode, 1, 2) \
+ F(LoadLookupSlotForCall, 1, 2)
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
@@ -642,10 +644,10 @@ namespace internal {
F(StoreGlobalIC_Miss, 4, 1) \
F(StoreGlobalIC_Slow, 5, 1) \
F(StoreIC_Miss, 5, 1) \
+ F(StoreInArrayLiteralIC_Slow, 5, 1) \
F(StorePropertyWithInterceptor, 5, 1)
#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
- FOR_EACH_INTRINSIC_IC(F) \
FOR_EACH_INTRINSIC_ARRAY(F) \
FOR_EACH_INTRINSIC_ATOMICS(F) \
FOR_EACH_INTRINSIC_BIGINT(F) \
@@ -656,11 +658,12 @@ namespace internal {
FOR_EACH_INTRINSIC_DEBUG(F) \
FOR_EACH_INTRINSIC_ERROR(F) \
FOR_EACH_INTRINSIC_FORIN(F) \
- FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
- FOR_EACH_INTRINSIC_INTL(F) \
+ FOR_EACH_INTRINSIC_IC(F) \
FOR_EACH_INTRINSIC_INTERNAL(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER(F) \
+ FOR_EACH_INTRINSIC_INTL(F) \
FOR_EACH_INTRINSIC_LITERALS(F) \
FOR_EACH_INTRINSIC_LIVEEDIT(F) \
FOR_EACH_INTRINSIC_MATHS(F) \
@@ -727,6 +730,12 @@ class Runtime : public AllStatic {
static const int kNotFound = -1;
+ // Checks whether the runtime function with the given {id} never returns
+ // to it's caller normally, i.e. whether it'll always raise an exception.
+ // More specifically: The C++ implementation returns the Heap::exception
+ // sentinel, always.
+ static bool IsNonReturning(FunctionId id);
+
// Get the intrinsic function with the given name.
static const Function* FunctionForName(const unsigned char* name, int length);
@@ -739,22 +748,22 @@ class Runtime : public AllStatic {
// Get the runtime intrinsic function table.
static const Function* RuntimeFunctionTable(Isolate* isolate);
- MUST_USE_RESULT static Maybe<bool> DeleteObjectProperty(
+ V8_WARN_UNUSED_RESULT static Maybe<bool> DeleteObjectProperty(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<Object> key,
LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> SetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
Handle<Object> value, LanguageMode language_mode);
- MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> GetObjectProperty(
Isolate* isolate, Handle<Object> object, Handle<Object> key,
bool* is_found_out = nullptr);
- MUST_USE_RESULT static MaybeHandle<JSArray> GetInternalProperties(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetInternalProperties(
Isolate* isolate, Handle<Object>);
- MUST_USE_RESULT static MaybeHandle<Object> ThrowIteratorError(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ThrowIteratorError(
Isolate* isolate, Handle<Object> object);
};
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index eac58186d5..0c5f5ffed6 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -98,7 +98,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
// Read the address of the word containing the target_address in an
// instruction stream.
@@ -174,11 +175,30 @@ Address RelocInfo::target_external_reference() {
return Assembler::target_address_at(pc_, constant_pool_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
@@ -217,6 +237,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 56870fd7c0..d0b1afa801 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -371,6 +371,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->constant_pool_size = 0;
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
@@ -2130,7 +2131,7 @@ void Assembler::GrowBuffer(int needed) {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index b0cc5b8cc4..ce3f04c996 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -261,8 +261,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
- sizeof(Register) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
#define DEFINE_REGISTER(R) \
@@ -303,8 +303,8 @@ class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(DoubleRegister) &&
- sizeof(DoubleRegister) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
+static_assert(sizeof(DoubleRegister) == sizeof(int),
"DoubleRegister can efficiently be passed by value");
typedef DoubleRegister FloatRegister;
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 91396bb597..c8b2415332 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -153,43 +153,30 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch2 = r9;
Label call_runtime, done, int_exponent;
- if (exponent_type() == TAGGED) {
- // Base is already in double_base.
- __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ LoadDouble(double_exponent,
- FieldMemOperand(exponent, HeapNumber::kValueOffset));
- }
+ // Detect integer exponents stored as double.
+ __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, double_scratch);
+ __ beq(&int_exponent, Label::kNear);
- if (exponent_type() != INTEGER) {
- // Detect integer exponents stored as double.
- __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
- double_scratch);
- __ beq(&int_exponent, Label::kNear);
-
- __ push(r14);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
- __ MovToFloatParameters(double_base, double_exponent);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- }
- __ pop(r14);
- __ MovFromFloatResult(double_result);
- __ b(&done);
+ __ push(r14);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ MovToFloatParameters(double_base, double_exponent);
+ __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+ 0, 2);
}
+ __ pop(r14);
+ __ MovFromFloatResult(double_result);
+ __ b(&done);
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
- if (exponent_type() == INTEGER) {
- __ LoadRR(scratch, exponent);
- } else {
- // Exponent has previously been stored into scratch as untagged integer.
- __ LoadRR(exponent, scratch);
- }
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ LoadRR(exponent, scratch);
+
__ ldr(double_scratch, double_base); // Back up base.
__ LoadImmP(scratch2, Operand(1));
__ ConvertIntToDouble(double_result, scratch2);
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index ecec9cb408..badecb461e 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -7,6 +7,7 @@
#include <memory>
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/s390/simulator-s390.h"
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/s390/frame-constants-s390.h
index 54638f56bc..f1c9febbb1 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/s390/frame-constants-s390.h
@@ -10,36 +10,38 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = 0 * kPointerSize;
+ static constexpr int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
- static const int kCallerPCOffset = 1 * kPointerSize;
+ static constexpr int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = 2 * kPointerSize;
+ static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = +2 * kPointerSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = +2 * kPointerSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 50db39c6b5..ad074026e1 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -161,7 +161,7 @@ void TurboAssembler::Call(Register target) {
}
void MacroAssembler::CallJSEntry(Register target) {
- DCHECK(target == ip);
+ DCHECK(target == r4);
Call(target);
}
@@ -1216,29 +1216,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
-
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- mov(r6, Operand(debug_is_active));
- tm(MemOperand(r6), Operand::Zero());
- bne(&skip_hook);
+ Label skip_hook;
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r6, Operand(debug_hook_avtive));
tm(MemOperand(r6), Operand::Zero());
- beq(&call_hook);
-
- LoadP(r6, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(r6, &skip_hook);
- LoadP(r6, FieldMemOperand(r6, DebugInfo::kFlagsOffset));
- SmiUntag(r0, r6);
- tmll(r0, Operand(DebugInfo::kBreakAtEntry));
- beq(&skip_hook);
+ bne(&skip_hook);
- bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1538,12 +1523,20 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- intptr_t bytes_address = reinterpret_cast<intptr_t>(stream->bytes());
- mov(kOffHeapTrampolineRegister, Operand(bytes_address));
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ mov(kOffHeapTrampolineRegister,
+ Operand(reinterpret_cast<intptr_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ CmpP(in, Operand(kClearedWeakHeapObject));
+ beq(target_if_cleared);
+
+ AndP(out, in, Operand(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0 && is_int8(value));
@@ -1661,6 +1654,18 @@ void MacroAssembler::AssertFixedArray(Register object) {
}
}
+void MacroAssembler::AssertConstructor(Register object, Register scratch) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
+ LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ tm(FieldMemOperand(scratch, Map::kBitFieldOffset),
+ Operand(Map::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor);
+ }
+}
+
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -4288,6 +4293,10 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ larl(dst, Operand(-pc_offset() / 2));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 1c3ea3fc54..7c123b9c5b 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -989,10 +989,16 @@ class TurboAssembler : public Assembler {
// High bits must be identical to fit into an 32-bit integer
cgfr(value, value);
}
- void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+ void SmiUntag(Register reg, int scale = 0) { SmiUntag(reg, reg, scale); }
- void SmiUntag(Register dst, Register src) {
- ShiftRightArithP(dst, src, Operand(kSmiShift));
+ void SmiUntag(Register dst, Register src, int scale = 0) {
+ if (scale > kSmiShift) {
+ ShiftLeftP(dst, src, Operand(scale - kSmiShift));
+ } else if (scale < kSmiShift) {
+ ShiftRightArithP(dst, src, Operand(kSmiShift - scale));
+ } else {
+ // do nothing
+ }
}
// Activation support.
@@ -1005,6 +1011,7 @@ class TurboAssembler : public Assembler {
Label* condition_met);
void ResetSpeculationPoisonRegister();
+ void ComputeCodeStartAddress(Register dst);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -1088,7 +1095,7 @@ class MacroAssembler : public TurboAssembler {
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
@@ -1109,6 +1116,10 @@ class MacroAssembler : public TurboAssembler {
Register scratch, DoubleRegister double_scratch);
// ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // ---------------------------------------------------------------------------
// StatsCounter support
void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
@@ -1228,6 +1239,10 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object, Register scratch);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
// Abort execution if argument is not a JSBoundFunction,
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/simulator-base.h
index 47a6b1a52c..9564af8a2e 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/simulator-base.h
@@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/globals.h"
+#include "src/isolate.h"
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/src/snapshot/builtin-deserializer-allocator.h b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
index eb04b54025..207f02811a 100644
--- a/deps/v8/src/snapshot/builtin-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/builtin-deserializer-allocator.h
@@ -57,6 +57,20 @@ class BuiltinDeserializerAllocator final {
void MoveToNextChunk(AllocationSpace space) { UNREACHABLE(); }
void SetAlignment(AllocationAlignment alignment) { UNREACHABLE(); }
+ void set_next_reference_is_weak(bool next_reference_is_weak) {
+ next_reference_is_weak_ = next_reference_is_weak;
+ }
+
+ bool GetAndClearNextReferenceIsWeak() {
+ bool saved = next_reference_is_weak_;
+ next_reference_is_weak_ = false;
+ return saved;
+ }
+
+#ifdef DEBUG
+ bool next_reference_is_weak() const { return next_reference_is_weak_; }
+#endif
+
HeapObject* GetMap(uint32_t index) { UNREACHABLE(); }
HeapObject* GetLargeObject(uint32_t index) { UNREACHABLE(); }
HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
@@ -123,6 +137,8 @@ class BuiltinDeserializerAllocator final {
// deserialization.
Address handler_allocation_ = nullptr;
+ bool next_reference_is_weak_ = false;
+
DISALLOW_COPY_AND_ASSIGN(BuiltinDeserializerAllocator)
};
diff --git a/deps/v8/src/snapshot/builtin-deserializer.cc b/deps/v8/src/snapshot/builtin-deserializer.cc
index e8e086fca3..8e3ad8c20c 100644
--- a/deps/v8/src/snapshot/builtin-deserializer.cc
+++ b/deps/v8/src/snapshot/builtin-deserializer.cc
@@ -113,8 +113,10 @@ Code* BuiltinDeserializer::DeserializeBuiltin(int builtin_id) {
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+
DCHECK(isolate()->builtins()->is_initialized());
- OFStream os(stdout);
code->Disassemble(Builtins::name(builtin_id), os);
os << std::flush;
}
@@ -131,7 +133,9 @@ Code* BuiltinDeserializer::DeserializeHandler(Bytecode bytecode,
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {
- OFStream os(stdout);
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+
code->Disassemble(Bytecodes::ToString(bytecode), os);
os << std::flush;
}
@@ -157,8 +161,16 @@ Code* BuiltinDeserializer::DeserializeBuiltinRaw(int builtin_id) {
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(code->instruction_start(), code->instruction_size());
-
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
+
+ PROFILE(isolate(), CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
+ AbstractCode::cast(code),
+ Builtins::name(builtin_id)));
+ LOG_CODE_EVENT(isolate(),
+ CodeLinePosInfoRecordEvent(
+ code->raw_instruction_start(),
+ ByteArray::cast(code->source_position_table())));
return code;
}
@@ -181,8 +193,16 @@ Code* BuiltinDeserializer::DeserializeHandlerRaw(Bytecode bytecode,
// Flush the instruction cache.
Code* code = Code::cast(o);
- Assembler::FlushICache(code->instruction_start(), code->instruction_size());
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
+ const char* handler_name =
+ isolate()->interpreter()->LookupNameOfBytecodeHandler(code);
+ if (handler_name == nullptr) {
+ handler_name = "UnknownBytecodeHadler";
+ }
+ PROFILE(isolate(), CodeCreateEvent(CodeEventListener::HANDLER_TAG,
+ AbstractCode::cast(code), handler_name));
return code;
}
diff --git a/deps/v8/src/snapshot/builtin-serializer-allocator.cc b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
index dbb5789721..af33e1ee49 100644
--- a/deps/v8/src/snapshot/builtin-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/builtin-serializer-allocator.cc
@@ -49,14 +49,12 @@ void BuiltinSerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int space = 0; space < kNumberOfSpaces; space++) {
+ for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int space = 0; space < kNumberOfSpaces; space++) {
+ for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
uint32_t space_size = (space == CODE_SPACE) ? virtual_chunk_size_ : 0;
PrintF("%16d", space_size);
}
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 8126e9ee2c..2697e9dce4 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -13,11 +13,8 @@
#include "src/objects-inl.h"
#include "src/snapshot/object-deserializer.h"
#include "src/snapshot/snapshot.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/version.h"
#include "src/visitors.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
namespace internal {
@@ -33,31 +30,49 @@ ScriptData::ScriptData(const byte* data, int length)
}
}
-ScriptData* CodeSerializer::Serialize(Isolate* isolate,
- Handle<SharedFunctionInfo> info,
- Handle<String> source) {
+// static
+ScriptCompiler::CachedData* CodeSerializer::Serialize(
+ Handle<SharedFunctionInfo> info, Handle<String> source) {
+ Isolate* isolate = info->GetIsolate();
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
+ HistogramTimerScope histogram_timer(isolate->counters()->compile_serialize());
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ RuntimeCallCounterId::kCompileSerialize);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
+
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
+ Handle<Script> script(Script::cast(info->script()), isolate);
if (FLAG_trace_serializer) {
PrintF("[Serializing from");
Object* script = info->script();
- if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
+ Script::cast(script)->name()->ShortPrint();
PrintF("]\n");
}
+ // TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
+ // context independent.
+ if (script->ContainsAsmModule()) return nullptr;
+ if (isolate->debug()->is_loaded()) return nullptr;
// Serialize code object.
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(source));
DisallowHeapAllocation no_gc;
cs.reference_map()->AddAttachedReference(*source);
- ScriptData* ret = cs.Serialize(info);
+ ScriptData* script_data = cs.Serialize(info);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
- int length = ret->length();
+ int length = script_data->length();
PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
}
- return ret;
+ ScriptCompiler::CachedData* result =
+ new ScriptCompiler::CachedData(script_data->data(), script_data->length(),
+ ScriptCompiler::CachedData::BufferOwned);
+ script_data->ReleaseDataOwnership();
+ delete script_data;
+
+ return result;
}
ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
@@ -73,6 +88,31 @@ ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
return data.GetScriptData();
}
+bool CodeSerializer::SerializeReadOnlyObject(HeapObject* obj,
+ HowToCode how_to_code,
+ WhereToPoint where_to_point,
+ int skip) {
+ PagedSpace* read_only_space = isolate()->heap()->read_only_space();
+ if (!read_only_space->Contains(obj)) return false;
+
+ // For objects in RO_SPACE, never serialize the object, but instead create a
+ // back reference that encodes the page number as the chunk_index and the
+ // offset within the page as the chunk_offset.
+ Address address = obj->address();
+ Page* page = Page::FromAddress(address);
+ uint32_t chunk_index = 0;
+ for (Page* p : *read_only_space) {
+ if (p == page) break;
+ ++chunk_index;
+ }
+ uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
+ SerializerReference back_reference =
+ SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
+ reference_map()->Add(obj, back_reference);
+ CHECK(SerializeBackReference(obj, how_to_code, where_to_point, skip));
+ return true;
+}
+
void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
@@ -85,6 +125,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+ if (SerializeReadOnlyObject(obj, how_to_code, where_to_point, skip)) return;
+
FlushSkip(skip);
if (obj->IsCode()) {
@@ -250,95 +292,12 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
result->abstract_code(), *result, name));
}
- return scope.CloseAndEscape(result);
-}
-
-WasmCompiledModuleSerializer::WasmCompiledModuleSerializer(
- Isolate* isolate, uint32_t source_hash, Handle<Context> native_context,
- Handle<SeqOneByteString> module_bytes)
- : CodeSerializer(isolate, source_hash) {
- reference_map()->AddAttachedReference(*isolate->native_context());
- reference_map()->AddAttachedReference(*module_bytes);
-}
-
-std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
- Isolate* isolate, Handle<FixedArray> input) {
- Handle<WasmCompiledModule> compiled_module =
- Handle<WasmCompiledModule>::cast(input);
- WasmCompiledModuleSerializer wasm_cs(
- isolate, 0, isolate->native_context(),
- handle(compiled_module->shared()->module_bytes()));
- ScriptData* data = wasm_cs.Serialize(compiled_module);
- return std::unique_ptr<ScriptData>(data);
-}
-MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
- Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes) {
- MaybeHandle<FixedArray> nothing;
- if (!wasm::IsWasmCodegenAllowed(isolate, isolate->native_context())) {
- return nothing;
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Handle<Script> script(Script::cast(result->script()), isolate);
+ Script::InitLineEnds(script);
}
- SerializedCodeData::SanityCheckResult sanity_check_result =
- SerializedCodeData::CHECK_SUCCESS;
-
- const SerializedCodeData scd = SerializedCodeData::FromCachedData(
- isolate, data, 0, &sanity_check_result);
-
- if (sanity_check_result != SerializedCodeData::CHECK_SUCCESS) {
- return nothing;
- }
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- MaybeHandle<WasmCompiledModule> maybe_result =
- ObjectDeserializer::DeserializeWasmCompiledModule(isolate, &scd,
- wire_bytes);
-
- Handle<WasmCompiledModule> result;
- if (!maybe_result.ToHandle(&result)) return nothing;
-
- WasmCompiledModule::ReinitializeAfterDeserialization(isolate, result);
- DCHECK(WasmCompiledModule::IsWasmCompiledModule(*result));
- return result;
-}
-
-void WasmCompiledModuleSerializer::SerializeCodeObject(
- Code* code_object, HowToCode how_to_code, WhereToPoint where_to_point) {
- Code::Kind kind = code_object->kind();
- switch (kind) {
- case Code::WASM_FUNCTION:
- case Code::JS_TO_WASM_FUNCTION: {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate()->heap());
- // Because the trap handler index is not meaningful across copies and
- // serializations, we need to serialize it as kInvalidIndex. We do this by
- // saving the old value, setting the index to kInvalidIndex and then
- // restoring the old value.
- const int old_trap_handler_index =
- code_object->trap_handler_index()->value();
- code_object->set_trap_handler_index(
- Smi::FromInt(trap_handler::kInvalidIndex));
-
- // Just serialize the code_object.
- SerializeGeneric(code_object, how_to_code, where_to_point);
- code_object->set_trap_handler_index(Smi::FromInt(old_trap_handler_index));
- break;
- }
- case Code::WASM_INTERPRETER_ENTRY:
- case Code::WASM_TO_JS_FUNCTION:
- case Code::WASM_TO_WASM_FUNCTION:
- // Serialize the illegal builtin instead. On instantiation of a
- // deserialized module, these will be replaced again.
- SerializeBuiltinReference(*BUILTIN_CODE(isolate(), Illegal), how_to_code,
- where_to_point, 0);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-bool WasmCompiledModuleSerializer::ElideObject(Object* obj) {
- return obj->IsWeakCell() || obj->IsForeign() || obj->IsBreakPointInfo();
+ return scope.CloseAndEscape(result);
}
class Checksum {
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index 8dd5131eb1..8e97f47f2f 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -45,13 +45,12 @@ class ScriptData {
class CodeSerializer : public Serializer<> {
public:
- static ScriptData* Serialize(Isolate* isolate,
- Handle<SharedFunctionInfo> info,
- Handle<String> source);
+ static ScriptCompiler::CachedData* Serialize(Handle<SharedFunctionInfo> info,
+ Handle<String> source);
ScriptData* Serialize(Handle<HeapObject> obj);
- MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
+ V8_WARN_UNUSED_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source);
const std::vector<uint32_t>* stub_keys() const { return &stub_keys_; }
@@ -79,31 +78,15 @@ class CodeSerializer : public Serializer<> {
void SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
WhereToPoint where_to_point);
+ bool SerializeReadOnlyObject(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
+
DisallowHeapAllocation no_gc_;
uint32_t source_hash_;
std::vector<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
-class WasmCompiledModuleSerializer : public CodeSerializer {
- public:
- static std::unique_ptr<ScriptData> SerializeWasmModule(
- Isolate* isolate, Handle<FixedArray> compiled_module);
- static MaybeHandle<FixedArray> DeserializeWasmModule(
- Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes);
-
- protected:
- void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
- WhereToPoint where_to_point) override;
- bool ElideObject(Object* obj) override;
-
- private:
- WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash,
- Handle<Context> native_context,
- Handle<SeqOneByteString> module_bytes);
- DISALLOW_COPY_AND_ASSIGN(WasmCompiledModuleSerializer);
-};
-
// Wrapper around ScriptData to provide code-serializer-specific functionality.
class SerializedCodeData : public SerializedData {
public:
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.cc b/deps/v8/src/snapshot/default-deserializer-allocator.cc
index 5b34bfa540..37d57286bc 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.cc
@@ -122,9 +122,8 @@ HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
void DefaultDeserializerAllocator::DecodeReservation(
std::vector<SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].size());
- STATIC_ASSERT(NEW_SPACE == 0);
- int current_space = NEW_SPACE;
+ DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
+ int current_space = FIRST_SPACE;
for (auto& r : res) {
reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
if (r.is_last()) current_space++;
@@ -135,7 +134,7 @@ void DefaultDeserializerAllocator::DecodeReservation(
bool DefaultDeserializerAllocator::ReserveSpace() {
#ifdef DEBUG
- for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
+ for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
DCHECK_GT(reservations_[i].size(), 0);
}
#endif // DEBUG
@@ -153,8 +152,6 @@ bool DefaultDeserializerAllocator::ReserveSpace() {
bool DefaultDeserializerAllocator::ReserveSpace(
StartupDeserializer* startup_deserializer,
BuiltinDeserializer* builtin_deserializer) {
- const int first_space = NEW_SPACE;
- const int last_space = SerializerDeserializer::kNumberOfSpaces;
Isolate* isolate = startup_deserializer->isolate();
// Create a set of merged reservations to reserve space in one go.
@@ -163,7 +160,7 @@ bool DefaultDeserializerAllocator::ReserveSpace(
// Instead, we manually determine the required code-space.
Heap::Reservation merged_reservations[kNumberOfSpaces];
- for (int i = first_space; i < last_space; i++) {
+ for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
merged_reservations[i] =
startup_deserializer->allocator()->reservations_[i];
}
@@ -206,12 +203,12 @@ bool DefaultDeserializerAllocator::ReserveSpace(
// Write back startup reservations.
- for (int i = first_space; i < last_space; i++) {
+ for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
startup_deserializer->allocator()->reservations_[i].swap(
merged_reservations[i]);
}
- for (int i = first_space; i < kNumberOfPreallocatedSpaces; i++) {
+ for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
startup_deserializer->allocator()->high_water_[i] =
startup_deserializer->allocator()->reservations_[i][0].start;
}
diff --git a/deps/v8/src/snapshot/default-deserializer-allocator.h b/deps/v8/src/snapshot/default-deserializer-allocator.h
index 124c637fc6..e6a5ba3fdc 100644
--- a/deps/v8/src/snapshot/default-deserializer-allocator.h
+++ b/deps/v8/src/snapshot/default-deserializer-allocator.h
@@ -36,6 +36,20 @@ class DefaultDeserializerAllocator final {
next_alignment_ = static_cast<AllocationAlignment>(alignment);
}
+ void set_next_reference_is_weak(bool next_reference_is_weak) {
+ next_reference_is_weak_ = next_reference_is_weak;
+ }
+
+ bool GetAndClearNextReferenceIsWeak() {
+ bool saved = next_reference_is_weak_;
+ next_reference_is_weak_ = false;
+ return saved;
+ }
+
+#ifdef DEBUG
+ bool next_reference_is_weak() const { return next_reference_is_weak_; }
+#endif
+
HeapObject* GetMap(uint32_t index);
HeapObject* GetLargeObject(uint32_t index);
HeapObject* GetObject(AllocationSpace space, uint32_t chunk_index,
@@ -80,6 +94,7 @@ class DefaultDeserializerAllocator final {
// The alignment of the next allocation.
AllocationAlignment next_alignment_ = kWordAligned;
+ bool next_reference_is_weak_ = false;
// All required maps are pre-allocated during reservation. {next_map_index_}
// stores the index of the next map to return from allocation.
diff --git a/deps/v8/src/snapshot/default-serializer-allocator.cc b/deps/v8/src/snapshot/default-serializer-allocator.cc
index b8cc55ff2b..c00f059704 100644
--- a/deps/v8/src/snapshot/default-serializer-allocator.cc
+++ b/deps/v8/src/snapshot/default-serializer-allocator.cc
@@ -70,6 +70,11 @@ bool DefaultSerializerAllocator::BackReferenceIsAlreadyAllocated(
return reference.large_object_index() < seen_large_objects_index_;
} else if (space == MAP_SPACE) {
return reference.map_index() < num_maps_;
+ } else if (space == RO_SPACE &&
+ serializer_->isolate()->heap()->deserialization_complete()) {
+ // If not deserializing the isolate itself, then we create BackReferences
+ // for all RO_SPACE objects without ever allocating.
+ return true;
} else {
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
@@ -86,8 +91,7 @@ std::vector<SerializedData::Reservation>
DefaultSerializerAllocator::EncodeReservations() const {
std::vector<SerializedData::Reservation> out;
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+ for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out.emplace_back(completed_chunks_[i][j]);
}
@@ -114,14 +118,12 @@ void DefaultSerializerAllocator::OutputStatistics() {
PrintF(" Spaces (bytes):\n");
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int space = 0; space < kNumberOfSpaces; space++) {
+ for (int space = FIRST_SPACE; space < kNumberOfSpaces; space++) {
PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
}
PrintF("\n");
- STATIC_ASSERT(NEW_SPACE == 0);
- for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
+ for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16" PRIuS, s);
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index d1e200ef1e..6436228b20 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -7,9 +7,11 @@
#include "src/assembler-inl.h"
#include "src/isolate.h"
#include "src/objects/hash-table.h"
+#include "src/objects/maybe-object.h"
#include "src/objects/string.h"
#include "src/snapshot/builtin-deserializer-allocator.h"
#include "src/snapshot/natives.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -20,7 +22,7 @@ void Deserializer<AllocatorT>::Initialize(Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
DCHECK_NULL(external_reference_table_);
- external_reference_table_ = ExternalReferenceTable::instance(isolate);
+ external_reference_table_ = isolate->heap()->external_reference_table();
#ifdef DEBUG
// Count the number of external references registered through the API.
num_api_references_ = 0;
@@ -69,7 +71,8 @@ void Deserializer<AllocatorT>::VisitRootPointers(Root root,
// The space must be new space. Any other space would cause ReadChunk to try
// to update the remembered using nullptr as the address.
- ReadData(start, end, NEW_SPACE, nullptr);
+ ReadData(reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end), NEW_SPACE, nullptr);
}
template <class AllocatorT>
@@ -97,8 +100,9 @@ void Deserializer<AllocatorT>::DeserializeDeferredObjects() {
HeapObject* object = GetBackReferencedObject(space);
int size = source_.GetInt() << kPointerSizeLog2;
Address obj_address = object->address();
- Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
- Object** end = reinterpret_cast<Object**>(obj_address + size);
+ MaybeObject** start =
+ reinterpret_cast<MaybeObject**>(obj_address + kPointerSize);
+ MaybeObject** end = reinterpret_cast<MaybeObject**>(obj_address + size);
bool filled = ReadData(start, end, space, obj_address);
CHECK(filled);
DCHECK(CanBeDeferred(object));
@@ -216,7 +220,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
FixedTypedArrayBase* elements =
FixedTypedArrayBase::cast(typed_array->elements());
// Must be off-heap layout.
- DCHECK_NULL(elements->base_pointer());
+ DCHECK(!typed_array->is_on_heap());
void* pointer_with_offset = reinterpret_cast<void*>(
reinterpret_cast<intptr_t>(elements->external_pointer()) +
@@ -231,7 +235,6 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
void* backing_store = off_heap_backing_stores_[store_index->value()];
buffer->set_backing_store(backing_store);
- buffer->set_allocation_base(backing_store);
isolate_->heap()->RegisterNewArrayBuffer(buffer);
}
} else if (obj->IsFixedTypedArrayBase()) {
@@ -250,6 +253,7 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
interpreter::Interpreter::InterruptBudget());
bytecode_array->set_osr_loop_nesting_level(0);
}
+
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
HeapObject::RequiredAlignment(obj->map())));
@@ -277,6 +281,18 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
case MAP_SPACE:
obj = allocator()->GetMap(back_reference.map_index());
break;
+ case RO_SPACE:
+ if (isolate()->heap()->deserialization_complete()) {
+ PagedSpace* read_only_space = isolate()->heap()->read_only_space();
+ Page* page = read_only_space->FirstPage();
+ for (uint32_t i = 0; i < back_reference.chunk_index(); ++i) {
+ page = page->next_page();
+ }
+ Address address = page->OffsetToAddress(back_reference.chunk_offset());
+ obj = HeapObject::FromAddress(address);
+ break;
+ }
+ V8_FALLTHROUGH;
default:
obj = allocator()->GetObject(static_cast<AllocationSpace>(space),
back_reference.chunk_index(),
@@ -289,6 +305,7 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
}
hot_objects_.Add(obj);
+ DCHECK(!HasWeakHeapObjectTag(obj));
return obj;
}
@@ -298,8 +315,9 @@ HeapObject* Deserializer<AllocatorT>::GetBackReferencedObject(int space) {
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
template <class AllocatorT>
-void Deserializer<AllocatorT>::ReadObject(int space_number,
- Object** write_back) {
+void Deserializer<AllocatorT>::ReadObject(
+ int space_number, MaybeObject** write_back,
+ HeapObjectReferenceType reference_type) {
const int size = source_.GetInt() << kObjectAlignmentBits;
Address address =
@@ -307,15 +325,18 @@ void Deserializer<AllocatorT>::ReadObject(int space_number,
HeapObject* obj = HeapObject::FromAddress(address);
isolate_->heap()->OnAllocationEvent(obj, size);
- Object** current = reinterpret_cast<Object**>(address);
- Object** limit = current + (size >> kPointerSizeLog2);
+ MaybeObject** current = reinterpret_cast<MaybeObject**>(address);
+ MaybeObject** limit = current + (size >> kPointerSizeLog2);
if (ReadData(current, limit, space_number, address)) {
// Only post process if object content has not been deferred.
obj = PostProcessNewObject(obj, space_number);
}
- Object* write_back_obj = obj;
+ MaybeObject* write_back_obj =
+ reference_type == HeapObjectReferenceType::STRONG
+ ? HeapObjectReference::Strong(obj)
+ : HeapObjectReference::Weak(obj);
UnalignedCopy(write_back, &write_back_obj);
#ifdef DEBUG
if (obj->IsCode()) {
@@ -328,15 +349,18 @@ void Deserializer<AllocatorT>::ReadObject(int space_number,
template <class AllocatorT>
Object* Deserializer<AllocatorT>::ReadDataSingle() {
- Object* o;
- Object** start = &o;
- Object** end = start + 1;
+ MaybeObject* o;
+ MaybeObject** start = &o;
+ MaybeObject** end = start + 1;
int source_space = NEW_SPACE;
Address current_object = nullptr;
CHECK(ReadData(start, end, source_space, current_object));
-
- return o;
+ HeapObject* heap_object;
+ bool success = o->ToStrongHeapObject(&heap_object);
+ DCHECK(success);
+ USE(success);
+ return heap_object;
}
static void NoExternalReferencesCallback() {
@@ -348,8 +372,8 @@ static void NoExternalReferencesCallback() {
}
template <class AllocatorT>
-bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
- int source_space,
+bool Deserializer<AllocatorT>::ReadData(MaybeObject** current,
+ MaybeObject** limit, int source_space,
Address current_object_address) {
Isolate* const isolate = isolate_;
// Write barrier support costs around 1% in startup time. In fact there
@@ -386,6 +410,8 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
CASE_STATEMENT(where, how, within, MAP_SPACE) \
V8_FALLTHROUGH; \
CASE_STATEMENT(where, how, within, LO_SPACE) \
+ V8_FALLTHROUGH; \
+ CASE_STATEMENT(where, how, within, RO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
@@ -445,12 +471,6 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
SINGLE_CASE(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
SINGLE_CASE(kPartialSnapshotCache, kFromCode, kStartOfObject, 0)
SINGLE_CASE(kPartialSnapshotCache, kFromCode, kInnerPointer, 0)
- // Find an external reference and write a pointer to it to the current
- // object.
- SINGLE_CASE(kExternalReference, kPlain, kStartOfObject, 0)
- // Find an external reference and write a pointer to it in the current
- // code object.
- SINGLE_CASE(kExternalReference, kFromCode, kStartOfObject, 0)
// Find an object in the attached references and write a pointer to it to
// the current object.
SINGLE_CASE(kAttachedReference, kPlain, kStartOfObject, 0)
@@ -467,11 +487,26 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kSkip: {
int size = source_.GetInt();
- current = reinterpret_cast<Object**>(
+ current = reinterpret_cast<MaybeObject**>(
reinterpret_cast<intptr_t>(current) + size);
break;
}
+ // Find an external reference and write a pointer to it to the current
+ // object.
+ case kExternalReference + kPlain + kStartOfObject:
+ current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
+ kPlain, isolate, reinterpret_cast<void**>(current),
+ current_object_address));
+ break;
+ // Find an external reference and write a pointer to it in the current
+ // code object.
+ case kExternalReference + kFromCode + kStartOfObject:
+ current = reinterpret_cast<MaybeObject**>(ReadExternalReferenceCase(
+ kFromCode, isolate, reinterpret_cast<void**>(current),
+ current_object_address));
+ break;
+
case kInternalReferenceEncoded:
case kInternalReference: {
// Internal reference address is not encoded via skip, but by offset
@@ -480,8 +515,9 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int target_offset = source_.GetInt();
Code* code =
Code::cast(HeapObject::FromAddress(current_object_address));
- DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
- DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
+ DCHECK(0 <= pc_offset && pc_offset <= code->raw_instruction_size());
+ DCHECK(0 <= target_offset &&
+ target_offset <= code->raw_instruction_size());
Address pc = code->entry() + pc_offset;
Address target = code->entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
@@ -491,6 +527,40 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
break;
}
+ case kOffHeapTarget: {
+#ifdef V8_EMBEDDED_BUILTINS
+ int skip = source_.GetInt();
+ int builtin_index = source_.GetInt();
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+
+ current = reinterpret_cast<MaybeObject**>(
+ reinterpret_cast<Address>(current) + skip);
+
+ CHECK_NOT_NULL(isolate->embedded_blob());
+ EmbeddedData d = EmbeddedData::FromBlob();
+ const uint8_t* address = d.InstructionStartOfBuiltin(builtin_index);
+ CHECK_NOT_NULL(address);
+
+ if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::deserialization_set_special_target_at(
+ location_of_branch_data,
+ Code::cast(HeapObject::FromAddress(current_object_address)),
+ const_cast<Address>(address));
+ location_of_branch_data += Assembler::kSpecialTargetSize;
+ current = reinterpret_cast<MaybeObject**>(location_of_branch_data);
+ } else {
+ MaybeObject* o =
+ reinterpret_cast<MaybeObject*>(const_cast<uint8_t*>(address));
+ UnalignedCopy(current, &o);
+ current++;
+ }
+#else
+ UNREACHABLE();
+#endif
+ break;
+ }
+
case kNop:
break;
@@ -502,8 +572,8 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kDeferred: {
// Deferred can only occur right after the heap object header.
- DCHECK(current == reinterpret_cast<Object**>(current_object_address +
- kPointerSize));
+ DCHECK_EQ(current, reinterpret_cast<MaybeObject**>(
+ current_object_address + kPointerSize));
HeapObject* obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
@@ -522,7 +592,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
int size_in_bytes = source_.GetInt();
byte* raw_data_out = reinterpret_cast<byte*>(current);
source_.CopyRaw(raw_data_out, size_in_bytes);
- current = reinterpret_cast<Object**>(
+ current = reinterpret_cast<MaybeObject**>(
reinterpret_cast<intptr_t>(current) + size_in_bytes);
break;
}
@@ -538,8 +608,9 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kVariableRepeat: {
int repeats = source_.GetInt();
- Object* object = current[-1];
+ MaybeObject* object = current[-1];
DCHECK(!isolate->heap()->InNewSpace(object));
+ DCHECK(!allocator()->next_reference_is_weak());
for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
break;
}
@@ -557,7 +628,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
case kApiReference: {
int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
+ current = reinterpret_cast<MaybeObject**>(
reinterpret_cast<Address>(current) + skip);
uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
Address address;
@@ -575,6 +646,11 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
break;
}
+ case kWeakPrefix:
+ DCHECK(!allocator()->next_reference_is_weak());
+ allocator()->set_next_reference_is_weak(true);
+ break;
+
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
case kAlignmentPrefix + 2: {
@@ -588,7 +664,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
SIXTEEN_CASES(kRootArrayConstantsWithSkip)
SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
+ current = reinterpret_cast<MaybeObject**>(
reinterpret_cast<intptr_t>(current) + skip);
V8_FALLTHROUGH;
}
@@ -597,8 +673,10 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
SIXTEEN_CASES(kRootArrayConstants + 16) {
int id = data & kRootArrayConstantsMask;
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
- Object* object = isolate->heap()->root(root_index);
+ MaybeObject* object =
+ MaybeObject::FromObject(isolate->heap()->root(root_index));
DCHECK(!isolate->heap()->InNewSpace(object));
+ DCHECK(!allocator()->next_reference_is_weak());
UnalignedCopy(current++, &object);
break;
}
@@ -607,7 +685,7 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
FOUR_CASES(kHotObjectWithSkip)
FOUR_CASES(kHotObjectWithSkip + 4) {
int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
+ current = reinterpret_cast<MaybeObject**>(
reinterpret_cast<Address>(current) + skip);
V8_FALLTHROUGH;
}
@@ -616,12 +694,18 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
FOUR_CASES(kHotObject + 4) {
int index = data & kHotObjectMask;
Object* hot_object = hot_objects_.Get(index);
- UnalignedCopy(current, &hot_object);
+ MaybeObject* hot_maybe_object = MaybeObject::FromObject(hot_object);
+ if (allocator()->GetAndClearNextReferenceIsWeak()) {
+ hot_maybe_object = MaybeObject::MakeWeak(hot_maybe_object);
+ }
+
+ UnalignedCopy(current, &hot_maybe_object);
if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
Address current_address = reinterpret_cast<Address>(current);
isolate->heap()->RecordWrite(
HeapObject::FromAddress(current_object_address),
- reinterpret_cast<Object**>(current_address), hot_object);
+ reinterpret_cast<MaybeObject**>(current_address),
+ hot_maybe_object);
}
current++;
break;
@@ -634,14 +718,15 @@ bool Deserializer<AllocatorT>::ReadData(Object** current, Object** limit,
byte* raw_data_out = reinterpret_cast<byte*>(current);
int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
source_.CopyRaw(raw_data_out, size_in_bytes);
- current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
+ current = reinterpret_cast<MaybeObject**>(raw_data_out + size_in_bytes);
break;
}
STATIC_ASSERT(kNumberOfFixedRepeat == 16);
SIXTEEN_CASES(kFixedRepeat) {
int repeats = data - kFixedRepeatStart;
- Object* object;
+ MaybeObject* object;
+ DCHECK(!allocator()->next_reference_is_weak());
UnalignedCopy(&object, current - 1);
DCHECK(!isolate->heap()->InNewSpace(object));
for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
@@ -684,30 +769,57 @@ int FixupJSConstructStub(Isolate* isolate, int builtin_id) {
} // namespace
template <class AllocatorT>
+void** Deserializer<AllocatorT>::ReadExternalReferenceCase(
+ HowToCode how, Isolate* isolate, void** current,
+ Address current_object_address) {
+ int skip = source_.GetInt();
+ current = reinterpret_cast<void**>(reinterpret_cast<Address>(current) + skip);
+ uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
+ Address address = external_reference_table_->address(reference_id);
+
+ if (how == kFromCode) {
+ Address location_of_branch_data = reinterpret_cast<Address>(current);
+ Assembler::deserialization_set_special_target_at(
+ location_of_branch_data,
+ Code::cast(HeapObject::FromAddress(current_object_address)), address);
+ location_of_branch_data += Assembler::kSpecialTargetSize;
+ current = reinterpret_cast<void**>(location_of_branch_data);
+ } else {
+ void* new_current = reinterpret_cast<void**>(address);
+ UnalignedCopy(current, &new_current);
+ ++current;
+ }
+ return current;
+}
+
+template <class AllocatorT>
template <int where, int how, int within, int space_number_if_any>
-Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
- Object** current,
- Address current_object_address,
- byte data,
- bool write_barrier_needed) {
+MaybeObject** Deserializer<AllocatorT>::ReadDataCase(
+ Isolate* isolate, MaybeObject** current, Address current_object_address,
+ byte data, bool write_barrier_needed) {
bool emit_write_barrier = false;
bool current_was_incremented = false;
int space_number = space_number_if_any == kAnyOldSpace ? (data & kSpaceMask)
: space_number_if_any;
+ HeapObjectReferenceType reference_type = HeapObjectReferenceType::STRONG;
if (where == kNewObject && how == kPlain && within == kStartOfObject) {
- ReadObject(space_number, current);
+ if (allocator()->GetAndClearNextReferenceIsWeak()) {
+ reference_type = HeapObjectReferenceType::WEAK;
+ }
+ ReadObject(space_number, current, reference_type);
emit_write_barrier = (space_number == NEW_SPACE);
} else {
Object* new_object = nullptr; /* May not be a real Object pointer. */
if (where == kNewObject) {
- ReadObject(space_number, &new_object);
+ ReadObject(space_number, reinterpret_cast<MaybeObject**>(&new_object),
+ HeapObjectReferenceType::STRONG);
} else if (where == kBackref) {
emit_write_barrier = (space_number == NEW_SPACE);
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kBackrefWithSkip) {
int skip = source_.GetInt();
- current =
- reinterpret_cast<Object**>(reinterpret_cast<Address>(current) + skip);
+ current = reinterpret_cast<MaybeObject**>(
+ reinterpret_cast<Address>(current) + skip);
emit_write_barrier = (space_number == NEW_SPACE);
new_object = GetBackReferencedObject(data & kSpaceMask);
} else if (where == kRootArray) {
@@ -720,13 +832,6 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
int cache_index = source_.GetInt();
new_object = isolate->partial_snapshot_cache()->at(cache_index);
emit_write_barrier = isolate->heap()->InNewSpace(new_object);
- } else if (where == kExternalReference) {
- int skip = source_.GetInt();
- current =
- reinterpret_cast<Object**>(reinterpret_cast<Address>(current) + skip);
- uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
- Address address = external_reference_table_->address(reference_id);
- new_object = reinterpret_cast<Object*>(address);
} else if (where == kAttachedReference) {
int index = source_.GetInt();
new_object = *attached_objects_[index];
@@ -744,26 +849,31 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
// At this point, new_object may still be uninitialized, thus the
// unchecked Code cast.
new_object = reinterpret_cast<Object*>(
- reinterpret_cast<Code*>(new_object)->instruction_start());
+ reinterpret_cast<Code*>(new_object)->raw_instruction_start());
} else if (new_object->IsCode()) {
new_object = reinterpret_cast<Object*>(
- Code::cast(new_object)->instruction_start());
+ Code::cast(new_object)->raw_instruction_start());
} else {
Cell* cell = Cell::cast(new_object);
new_object = reinterpret_cast<Object*>(cell->ValueAddress());
}
}
if (how == kFromCode) {
+ DCHECK(!allocator()->next_reference_is_weak());
Address location_of_branch_data = reinterpret_cast<Address>(current);
Assembler::deserialization_set_special_target_at(
location_of_branch_data,
Code::cast(HeapObject::FromAddress(current_object_address)),
reinterpret_cast<Address>(new_object));
location_of_branch_data += Assembler::kSpecialTargetSize;
- current = reinterpret_cast<Object**>(location_of_branch_data);
+ current = reinterpret_cast<MaybeObject**>(location_of_branch_data);
current_was_incremented = true;
} else {
- UnalignedCopy(current, &new_object);
+ MaybeObject* new_maybe_object = MaybeObject::FromObject(new_object);
+ if (allocator()->GetAndClearNextReferenceIsWeak()) {
+ new_maybe_object = MaybeObject::MakeWeak(new_maybe_object);
+ }
+ UnalignedCopy(current, &new_maybe_object);
}
}
if (emit_write_barrier && write_barrier_needed) {
@@ -771,8 +881,8 @@ Object** Deserializer<AllocatorT>::ReadDataCase(Isolate* isolate,
SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
isolate->heap()->RecordWrite(
HeapObject::FromAddress(current_object_address),
- reinterpret_cast<Object**>(current_address),
- *reinterpret_cast<Object**>(current_address));
+ reinterpret_cast<MaybeObject**>(current_address),
+ *reinterpret_cast<MaybeObject**>(current_address));
}
if (!current_was_incremented) {
current++;
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index cd563e46a1..d3b57b2137 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -101,7 +101,9 @@ class Deserializer : public SerializerDeserializer {
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- void UnalignedCopy(Object** dest, Object** src) {
+ template <typename T>
+ void UnalignedCopy(T** dest, T** src) {
+ DCHECK(!allocator()->next_reference_is_weak());
memcpy(dest, src, sizeof(*src));
}
@@ -110,17 +112,24 @@ class Deserializer : public SerializerDeserializer {
// of the object we are writing into, or nullptr if we are not writing into an
// object, i.e. if we are writing a series of tagged values that are not on
// the heap. Return false if the object content has been deferred.
- bool ReadData(Object** start, Object** end, int space,
+ bool ReadData(MaybeObject** start, MaybeObject** end, int space,
Address object_address);
// A helper function for ReadData, templatized on the bytecode for efficiency.
// Returns the new value of {current}.
template <int where, int how, int within, int space_number_if_any>
- inline Object** ReadDataCase(Isolate* isolate, Object** current,
- Address current_object_address, byte data,
- bool write_barrier_needed);
+ inline MaybeObject** ReadDataCase(Isolate* isolate, MaybeObject** current,
+ Address current_object_address, byte data,
+ bool write_barrier_needed);
- void ReadObject(int space_number, Object** write_back);
+ // A helper function for ReadData for reading external references.
+ // Returns the new value of {current}.
+ inline void** ReadExternalReferenceCase(HowToCode how, Isolate* isolate,
+ void** current,
+ Address current_object_address);
+
+ void ReadObject(int space_number, MaybeObject** write_back,
+ HeapObjectReferenceType reference_type);
// Special handling for serialized code like hooking up internalized strings.
HeapObject* PostProcessNewObject(HeapObject* obj, int space);
@@ -171,7 +180,7 @@ class StringTableInsertionKey : public StringTableKey {
bool IsMatch(Object* string) override;
- MUST_USE_RESULT Handle<String> AsHandle(Isolate* isolate) override;
+ V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) override;
private:
uint32_t ComputeHashField(String* string);
diff --git a/deps/v8/src/snapshot/embedded-empty.cc b/deps/v8/src/snapshot/embedded-empty.cc
new file mode 100644
index 0000000000..3294a6788e
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded-empty.cc
@@ -0,0 +1,24 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Used for building without embedded data.
+
+#include <cstdint>
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_EMBEDDED_BUILTINS
+const uint8_t* DefaultEmbeddedBlob() { return nullptr; }
+uint32_t DefaultEmbeddedBlobSize() { return 0; }
+
+#ifdef V8_MULTI_SNAPSHOTS
+const uint8_t* TrustedEmbeddedBlob() { return nullptr; }
+uint32_t TrustedEmbeddedBlobSize() { return 0; }
+#endif
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/macros.h b/deps/v8/src/snapshot/macros.h
new file mode 100644
index 0000000000..5ea6917c16
--- /dev/null
+++ b/deps/v8/src/snapshot/macros.h
@@ -0,0 +1,78 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_MACROS_H_
+#define V8_SNAPSHOT_MACROS_H_
+
+#include "include/v8config.h"
+
+// .byte portability macros.
+
+#if defined(V8_OS_MACOSX) // MACOSX
+#define V8_ASM_MANGLE_LABEL "_"
+#define V8_ASM_RODATA_SECTION ".const_data\n"
+#define V8_ASM_TEXT_SECTION ".text\n"
+#define V8_ASM_DECLARE(NAME) ".private_extern " V8_ASM_MANGLE_LABEL NAME "\n"
+#elif defined(V8_OS_AIX) // AIX
+#define V8_ASM_RODATA_SECTION ".csect[RO]\n"
+#define V8_ASM_TEXT_SECTION ".csect .text[PR]\n"
+#define V8_ASM_MANGLE_LABEL ""
+#define V8_ASM_DECLARE(NAME) ".globl " V8_ASM_MANGLE_LABEL NAME "\n"
+#elif defined(V8_OS_WIN) // WIN
+#if defined(V8_TARGET_ARCH_X64)
+#define V8_ASM_MANGLE_LABEL ""
+#else
+#define V8_ASM_MANGLE_LABEL "_"
+#endif
+#define V8_ASM_RODATA_SECTION ".section .rodata\n"
+#define V8_ASM_TEXT_SECTION ".section .text\n"
+#define V8_ASM_DECLARE(NAME)
+#else // !MACOSX && !WIN && !AIX
+#define V8_ASM_MANGLE_LABEL ""
+#define V8_ASM_RODATA_SECTION ".section .rodata\n"
+#define V8_ASM_TEXT_SECTION ".section .text\n"
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+#define V8_ASM_DECLARE(NAME) ".global " V8_ASM_MANGLE_LABEL NAME "\n"
+#else
+#define V8_ASM_DECLARE(NAME) ".local " V8_ASM_MANGLE_LABEL NAME "\n"
+#endif
+#endif
+
+// Align to kCodeAlignment.
+#define V8_ASM_BALIGN32 ".balign 32\n"
+#define V8_ASM_LABEL(NAME) V8_ASM_MANGLE_LABEL NAME ":\n"
+
+// clang-format off
+#if defined(V8_OS_AIX)
+
+#define V8_EMBEDDED_TEXT_HEADER(LABEL) \
+ __asm__(V8_ASM_DECLARE(#LABEL) \
+ ".csect " #LABEL "[DS]\n" \
+ #LABEL ":\n" \
+ ".llong ." #LABEL ", TOC[tc0], 0\n" \
+ V8_ASM_TEXT_SECTION \
+ "." #LABEL ":\n");
+
+#define V8_EMBEDDED_RODATA_HEADER(LABEL) \
+ __asm__(V8_ASM_RODATA_SECTION \
+ V8_ASM_DECLARE(#LABEL) \
+ ".align 5\n" \
+ V8_ASM_LABEL(#LABEL));
+
+#else
+
+#define V8_EMBEDDED_TEXT_HEADER(LABEL) \
+ __asm__(V8_ASM_TEXT_SECTION \
+ V8_ASM_DECLARE(#LABEL) \
+ V8_ASM_BALIGN32 \
+ V8_ASM_LABEL(#LABEL));
+
+#define V8_EMBEDDED_RODATA_HEADER(LABEL) \
+ __asm__(V8_ASM_RODATA_SECTION \
+ V8_ASM_DECLARE(#LABEL) \
+ V8_ASM_BALIGN32 \
+ V8_ASM_LABEL(#LABEL));
+
+#endif // #if defined(V8_OS_AIX)
+#endif // V8_SNAPSHOT_MACROS_H_
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 33c6c4a115..c862d63090 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -13,13 +13,25 @@
#include "src/msan.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
+namespace {
class SnapshotWriter {
public:
SnapshotWriter()
: snapshot_cpp_path_(nullptr), snapshot_blob_path_(nullptr) {}
+#ifdef V8_EMBEDDED_BUILTINS
+ void SetEmbeddedFile(const char* embedded_cpp_file) {
+ embedded_cpp_path_ = embedded_cpp_file;
+ }
+
+ void SetEmbeddedVariant(const char* embedded_variant) {
+ embedded_variant_ = embedded_variant;
+ }
+#endif
+
void SetSnapshotFile(const char* snapshot_cpp_file) {
snapshot_cpp_path_ = snapshot_cpp_file;
}
@@ -39,6 +51,12 @@ class SnapshotWriter {
MaybeWriteStartupBlob(blob_vector);
}
+#ifdef V8_EMBEDDED_BUILTINS
+ void WriteEmbedded(const i::EmbeddedData* blob) const {
+ MaybeWriteEmbeddedFile(blob);
+ }
+#endif
+
private:
void MaybeWriteStartupBlob(const i::Vector<const i::byte>& blob) const {
if (!snapshot_blob_path_) return;
@@ -58,14 +76,14 @@ class SnapshotWriter {
FILE* fp = GetFileDescriptorOrDie(snapshot_cpp_path_);
- WriteFilePrefix(fp);
- WriteData(fp, blob);
- WriteFileSuffix(fp);
+ WriteSnapshotFilePrefix(fp);
+ WriteSnapshotFileData(fp, blob);
+ WriteSnapshotFileSuffix(fp);
fclose(fp);
}
- static void WriteFilePrefix(FILE* fp) {
+ static void WriteSnapshotFilePrefix(FILE* fp) {
fprintf(fp, "// Autogenerated snapshot file. Do not edit.\n\n");
fprintf(fp, "#include \"src/v8.h\"\n");
fprintf(fp, "#include \"src/base/platform/platform.h\"\n\n");
@@ -74,7 +92,7 @@ class SnapshotWriter {
fprintf(fp, "namespace internal {\n\n");
}
- static void WriteFileSuffix(FILE* fp) {
+ static void WriteSnapshotFileSuffix(FILE* fp) {
fprintf(fp, "const v8::StartupData* Snapshot::DefaultSnapshotBlob() {\n");
fprintf(fp, " return &blob;\n");
fprintf(fp, "}\n\n");
@@ -82,17 +100,18 @@ class SnapshotWriter {
fprintf(fp, "} // namespace v8\n");
}
- static void WriteData(FILE* fp, const i::Vector<const i::byte>& blob) {
+ static void WriteSnapshotFileData(FILE* fp,
+ const i::Vector<const i::byte>& blob) {
fprintf(fp, "static const byte blob_data[] = {\n");
- WriteSnapshotData(fp, blob);
+ WriteBinaryContentsAsCArray(fp, blob);
fprintf(fp, "};\n");
fprintf(fp, "static const int blob_size = %d;\n", blob.length());
fprintf(fp, "static const v8::StartupData blob =\n");
fprintf(fp, "{ (const char*) blob_data, blob_size };\n");
}
- static void WriteSnapshotData(FILE* fp,
- const i::Vector<const i::byte>& blob) {
+ static void WriteBinaryContentsAsCArray(
+ FILE* fp, const i::Vector<const i::byte>& blob) {
for (int i = 0; i < blob.length(); i++) {
if ((i & 0x1F) == 0x1F) fprintf(fp, "\n");
if (i > 0) fprintf(fp, ",");
@@ -101,6 +120,95 @@ class SnapshotWriter {
fprintf(fp, "\n");
}
+#ifdef V8_EMBEDDED_BUILTINS
+ void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
+ if (embedded_cpp_path_ == nullptr) return;
+
+ FILE* fp = GetFileDescriptorOrDie(embedded_cpp_path_);
+
+ WriteEmbeddedFilePrefix(fp);
+ WriteEmbeddedFileData(fp, blob, embedded_variant_);
+ WriteEmbeddedFileSuffix(fp, embedded_variant_);
+
+ fclose(fp);
+ }
+
+ static void WriteEmbeddedFilePrefix(FILE* fp) {
+ fprintf(fp, "// Autogenerated file. Do not edit.\n\n");
+ fprintf(fp, "#include <cstdint>\n\n");
+ fprintf(fp, "#include \"src/snapshot/macros.h\"\n\n");
+ fprintf(fp, "namespace v8 {\n");
+ fprintf(fp, "namespace internal {\n\n");
+ fprintf(fp, "namespace {\n\n");
+ }
+
+ static void WriteEmbeddedFileSuffix(FILE* fp, const char* embedded_variant) {
+ fprintf(fp, "} // namespace\n\n");
+ fprintf(fp,
+ "const uint8_t* %sEmbeddedBlob() { return "
+ "v8_%s_embedded_blob_; }\n",
+ embedded_variant, embedded_variant);
+ fprintf(fp,
+ "uint32_t %sEmbeddedBlobSize() { return "
+ "v8_embedded_blob_size_; }\n\n",
+ embedded_variant);
+ fprintf(fp, "} // namespace internal\n");
+ fprintf(fp, "} // namespace v8\n");
+ }
+
+ static void WriteEmbeddedFileData(FILE* fp, const i::EmbeddedData* blob,
+ const char* embedded_variant) {
+ // Note: On some platforms (observed on mac64), inserting labels into the
+ // .byte stream causes the compiler to reorder symbols, invalidating stored
+ // offsets.
+ // We either need to avoid doing so, or stop relying on our own offset table
+ // and directly reference symbols instead. But there is another complication
+ // there since the chrome build process on mac verifies the order of symbols
+ // present in the binary.
+ // For now, the straight-forward solution seems to be to just emit a pure
+ // .byte stream.
+ fprintf(fp, "V8_EMBEDDED_TEXT_HEADER(v8_%s_embedded_blob_)\n",
+ embedded_variant);
+ WriteBinaryContentsAsByteDirective(fp, blob->data(), blob->size());
+ fprintf(fp, "extern \"C\" const uint8_t v8_%s_embedded_blob_[];\n",
+ embedded_variant);
+ fprintf(fp, "static const uint32_t v8_embedded_blob_size_ = %d;\n\n",
+ blob->size());
+ }
+
+ static void WriteBinaryContentsAsByteDirective(FILE* fp, const uint8_t* data,
+ uint32_t size) {
+ static const int kTextWidth = 80;
+ int current_line_length = 0;
+ int printed_chars;
+
+ fprintf(fp, "__asm__(\n");
+ for (uint32_t i = 0; i < size; i++) {
+ if (current_line_length == 0) {
+ printed_chars = fprintf(fp, "%s", " \".byte ");
+ DCHECK_LT(0, printed_chars);
+ current_line_length += printed_chars;
+ } else {
+ printed_chars = fprintf(fp, ",");
+ DCHECK_EQ(1, printed_chars);
+ current_line_length += printed_chars;
+ }
+
+ printed_chars = fprintf(fp, "0x%02x", data[i]);
+ DCHECK_LT(0, printed_chars);
+ current_line_length += printed_chars;
+
+ if (current_line_length + strlen(",0xFF\\n\"") > kTextWidth) {
+ fprintf(fp, "\\n\"\n");
+ current_line_length = 0;
+ }
+ }
+
+ if (current_line_length != 0) fprintf(fp, "\\n\"\n");
+ fprintf(fp, ");\n");
+ }
+#endif
+
static FILE* GetFileDescriptorOrDie(const char* filename) {
FILE* fp = v8::base::OS::FOpen(filename, "wb");
if (fp == nullptr) {
@@ -110,6 +218,10 @@ class SnapshotWriter {
return fp;
}
+#ifdef V8_EMBEDDED_BUILTINS
+ const char* embedded_cpp_path_ = nullptr;
+ const char* embedded_variant_ = "Default";
+#endif
const char* snapshot_cpp_path_;
const char* snapshot_blob_path_;
};
@@ -139,8 +251,117 @@ char* GetExtraCode(char* filename, const char* description) {
return chars;
}
+bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
+ const char* utf8_source, const char* name) {
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ v8::Context::Scope context_scope(context);
+ v8::TryCatch try_catch(isolate);
+ v8::Local<v8::String> source_string;
+ if (!v8::String::NewFromUtf8(isolate, utf8_source, v8::NewStringType::kNormal)
+ .ToLocal(&source_string)) {
+ return false;
+ }
+ v8::Local<v8::String> resource_name =
+ v8::String::NewFromUtf8(isolate, name, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::ScriptOrigin origin(resource_name);
+ v8::ScriptCompiler::Source source(source_string, origin);
+ v8::Local<v8::Script> script;
+ if (!v8::ScriptCompiler::Compile(context, &source).ToLocal(&script))
+ return false;
+ if (script->Run(context).IsEmpty()) return false;
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
+ CHECK(!try_catch.HasCaught());
+ return true;
+}
+
+v8::StartupData CreateSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
+ const char* script_source = NULL) {
+ // Create a new isolate and a new context from scratch, optionally run
+ // a script to embed, and serialize to create a snapshot blob.
+ v8::StartupData result = {nullptr, 0};
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ {
+ v8::Isolate* isolate = snapshot_creator->GetIsolate();
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ if (script_source != nullptr &&
+ !RunExtraCode(isolate, context, script_source, "<embedded>")) {
+ return result;
+ }
+ snapshot_creator->SetDefaultContext(context);
+ }
+ result = snapshot_creator->CreateBlob(
+ v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ }
+
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Creating snapshot took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
+ return result;
+}
+
+v8::StartupData WarmUpSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
+ const char* warmup_source) {
+ CHECK_NOT_NULL(warmup_source);
+ // Use following steps to create a warmed up snapshot blob from a cold one:
+ // - Create a new isolate from the cold snapshot.
+ // - Create a new context to run the warmup script. This will trigger
+ // compilation of executed functions.
+ // - Create a new context. This context will be unpolluted.
+ // - Serialize the isolate and the second context into a new snapshot blob.
+ v8::StartupData result = {nullptr, 0};
+ v8::base::ElapsedTimer timer;
+ timer.Start();
+ {
+ v8::Isolate* isolate = snapshot_creator->GetIsolate();
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
+ return result;
+ }
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ isolate->ContextDisposedNotification(false);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ snapshot_creator->SetDefaultContext(context);
+ }
+ result = snapshot_creator->CreateBlob(
+ v8::SnapshotCreator::FunctionCodeHandling::kKeep);
+ }
+
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Warming up snapshot took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
+ return result;
+}
+
+#ifdef V8_EMBEDDED_BUILTINS
+void WriteEmbeddedFile(v8::SnapshotCreator* creator, SnapshotWriter* writer) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(creator->GetIsolate());
+ isolate->PrepareEmbeddedBlobForSerialization();
+ i::EmbeddedData embedded_blob = i::EmbeddedData::FromBlob();
+ writer->WriteEmbedded(&embedded_blob);
+}
+#endif // V8_EMBEDDED_BUILTINS
+} // namespace
int main(int argc, char** argv) {
+ v8::base::EnsureConsoleOutput();
+
// Make mksnapshot runs predictable to create reproducible snapshots.
i::FLAG_predictable = true;
@@ -164,19 +385,39 @@ int main(int argc, char** argv) {
SnapshotWriter writer;
if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
+#ifdef V8_EMBEDDED_BUILTINS
+ if (i::FLAG_embedded_src) writer.SetEmbeddedFile(i::FLAG_embedded_src);
+ if (i::FLAG_embedded_variant)
+ writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
+#endif
+
+ std::unique_ptr<char> embed_script(
+ GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding"));
+ std::unique_ptr<char> warmup_script(
+ GetExtraCode(argc >= 3 ? argv[2] : nullptr, "warm up"));
- char* embed_script =
- GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding");
- v8::StartupData blob = v8::V8::CreateSnapshotDataBlob(embed_script);
- delete[] embed_script;
+ v8::StartupData blob;
+ {
+ v8::SnapshotCreator snapshot_creator;
+#ifdef V8_EMBEDDED_BUILTINS
+ // This process is a bit tricky since we might go on to make a second
+ // snapshot if a warmup script is passed. In that case, create the first
+ // snapshot without off-heap trampolines and only move code off-heap for
+ // the warmed-up snapshot.
+ if (!warmup_script) WriteEmbeddedFile(&snapshot_creator, &writer);
+#endif
+ blob = CreateSnapshotDataBlob(&snapshot_creator, embed_script.get());
+ }
- char* warmup_script =
- GetExtraCode(argc >= 3 ? argv[2] : nullptr, "warm up");
if (warmup_script) {
+ CHECK(blob.raw_size > 0 && blob.data != nullptr);
v8::StartupData cold = blob;
- blob = v8::V8::WarmUpSnapshotDataBlob(cold, warmup_script);
+ v8::SnapshotCreator snapshot_creator(nullptr, &cold);
+#ifdef V8_EMBEDDED_BUILTINS
+ WriteEmbeddedFile(&snapshot_creator, &writer);
+#endif
+ blob = WarmUpSnapshotDataBlob(&snapshot_creator, warmup_script.get());
delete[] cold.data;
- delete[] warmup_script;
}
CHECK(blob.data);
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index bd8757e318..1769f9ca24 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -59,7 +59,7 @@ ObjectDeserializer::DeserializeWasmCompiledModule(
if (!d.Deserialize(isolate).ToHandle(&result))
return MaybeHandle<WasmCompiledModule>();
- if (!result->IsFixedArray()) return MaybeHandle<WasmCompiledModule>();
+ if (!result->IsWasmCompiledModule()) return MaybeHandle<WasmCompiledModule>();
// Cast without type checks, as the module wrapper is not there yet.
return handle(static_cast<WasmCompiledModule*>(*result), isolate);
@@ -93,7 +93,8 @@ void ObjectDeserializer::
for (Code* code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
isolate()->heap()->RecordWritesIntoCode(code);
- Assembler::FlushICache(code->instruction_start(), code->instruction_size());
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
}
}
@@ -102,9 +103,10 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
StringTable::EnsureCapacityForDeserialization(
isolate(), static_cast<int>(new_internalized_strings().size()));
for (Handle<String> string : new_internalized_strings()) {
+ DisallowHeapAllocation no_gc;
StringTableInsertionKey key(*string);
DCHECK_NULL(StringTable::ForwardStringIfExists(isolate(), &key, *string));
- StringTable::LookupKey(isolate(), &key);
+ StringTable::AddKeyNoResize(isolate(), &key);
}
Heap* heap = isolate()->heap();
@@ -113,7 +115,8 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
// Assign a new script id to avoid collision.
script->set_id(isolate()->heap()->NextScriptId());
// Add script to list.
- Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
+ Handle<Object> list =
+ FixedArrayOfWeakCells::Add(factory->script_list(), script);
heap->SetRootScriptList(*list);
}
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 6446f5e93f..afa6aa5fc5 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -31,7 +31,7 @@ MaybeHandle<Object> PartialDeserializer::Deserialize(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
Initialize(isolate);
if (!allocator()->ReserveSpace()) {
- V8::FatalProcessOutOfMemory("PartialDeserializer");
+ V8::FatalProcessOutOfMemory(isolate, "PartialDeserializer");
}
AddAttachedObject(global_proxy);
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 6661d9799f..8b4c9d8d92 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -53,10 +53,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
- BuiltinReferenceSerializationMode mode =
- startup_serializer_->clear_function_code() ? kCanonicalizeCompileLazy
- : kDefault;
- if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
+ if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
return;
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
@@ -104,6 +101,13 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
+ if (obj->IsJSFunction()) {
+ // Unconditionally reset the JSFunction to its SFI's code, since we can't
+ // serialize optimized code anyway.
+ JSFunction* closure = JSFunction::cast(obj);
+ closure->set_code(closure->shared()->GetCode());
+ }
+
CheckRehashability(obj);
// Object has not yet been serialized. Serialize it here.
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index d928b02ba1..d4f0c9eff7 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -22,7 +22,7 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
map_ = new AddressToIndexHashMap();
isolate->set_external_reference_map(map_);
// Add V8's external references.
- ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
+ ExternalReferenceTable* table = isolate->heap()->external_reference_table();
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
// Ignore duplicate references.
@@ -86,7 +86,7 @@ const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
Address address) const {
Maybe<uint32_t> maybe_index = map_->Get(address);
if (maybe_index.IsNothing()) return "<unknown>";
- return ExternalReferenceTable::instance(isolate)->name(
+ return isolate->heap()->external_reference_table()->name(
maybe_index.FromJust());
}
@@ -98,6 +98,11 @@ void SerializedData::AllocateData(uint32_t size) {
DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
}
+// static
+uint32_t SerializedData::ComputeMagicNumber(Isolate* isolate) {
+ return ComputeMagicNumber(isolate->heap()->external_reference_table());
+}
+
// The partial snapshot cache is terminated by undefined. We visit the
// partial snapshot...
// - during deserialization to populate it.
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index f68694d5b8..26c2da2247 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -117,67 +117,55 @@ class SerializerDeserializer : public RootVisitor {
const std::vector<CallHandlerInfo*>& call_handler_infos);
#define UNUSED_SERIALIZER_BYTE_CODES(V) \
- V(0x1d) \
- V(0x1e) \
- V(0x55) \
- V(0x56) \
- V(0x57) \
- V(0x75) \
+ V(0x18) \
+ V(0x3d) \
+ V(0x3e) \
+ V(0x3f) \
+ V(0x58) \
+ V(0x59) \
+ V(0x5a) \
+ V(0x5b) \
+ V(0x5c) \
+ V(0x5d) \
+ V(0x5e) \
+ V(0x5f) \
+ V(0x67) \
V(0x76) \
- V(0x77) \
V(0x78) \
V(0x79) \
V(0x7a) \
V(0x7b) \
V(0x7c) \
- V(0x7d) \
- V(0x7e) \
- V(0x7f) \
- V(0xf0) \
- V(0xf1) \
- V(0xf2) \
- V(0xf3) \
- V(0xf4) \
- V(0xf5) \
- V(0xf6) \
- V(0xf7) \
- V(0xf8) \
- V(0xf9) \
- V(0xfa) \
- V(0xfb) \
- V(0xfc) \
- V(0xfd) \
- V(0xfe) \
- V(0xff)
+ V(0x7d)
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
// The static assert below will trigger when the number of preallocated spaces
// changed. If that happens, update the bytecode ranges in the comments below.
- STATIC_ASSERT(5 == kNumberOfSpaces);
+ STATIC_ASSERT(6 == kNumberOfSpaces);
enum Where {
- // 0x00..0x04 Allocate new object, in specified space.
+ // 0x00..0x05 Allocate new object, in specified space.
kNewObject = 0x00,
- // 0x08..0x0c Reference to previous object from space.
+ // 0x08..0x0d Reference to previous object from space.
kBackref = 0x08,
- // 0x10..0x14 Reference to previous object from space after skip.
+ // 0x10..0x15 Reference to previous object from space after skip.
kBackrefWithSkip = 0x10,
- // 0x05 Root array item.
- kRootArray = 0x05,
// 0x06 Object in the partial snapshot cache.
kPartialSnapshotCache = 0x06,
// 0x07 External reference referenced by id.
kExternalReference = 0x07,
- // 0x0d Object provided in the attached list.
- kAttachedReference = 0x0d,
// 0x0e Builtin code referenced by index.
kBuiltin = 0x0e,
+ // 0x16 Root array item.
+ kRootArray = 0x16,
+ // 0x17 Object provided in the attached list.
+ kAttachedReference = 0x17,
// 0x0f Misc, see below (incl. 0x2f, 0x4f, 0x6f).
- // 0x15..0x1f Misc, see below (incl. 0x35..0x3f, 0x55..0x5f, 0x75..0x7f).
+ // 0x18..0x1f Misc, see below (incl. 0x38..0x3f, 0x58..0x5f, 0x78..0x7f).
};
static const int kWhereMask = 0x1f;
@@ -213,41 +201,38 @@ class SerializerDeserializer : public RootVisitor {
static const int kNextChunk = 0x4f;
// Deferring object content.
static const int kDeferred = 0x6f;
- // Alignment prefixes 0x15..0x17
- static const int kAlignmentPrefix = 0x15;
+ // Alignment prefixes 0x19..0x1b
+ static const int kAlignmentPrefix = 0x19;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- static const int kSynchronize = 0x18;
+ static const int kSynchronize = 0x1c;
// Repeats of variable length.
- static const int kVariableRepeat = 0x19;
+ static const int kVariableRepeat = 0x1d;
// Raw data of variable length.
- static const int kVariableRawCode = 0x1a;
- static const int kVariableRawData = 0x1b;
// Used for embedder-allocated backing stores for TypedArrays.
- static const int kOffHeapBackingStore = 0x1c;
+ static const int kOffHeapBackingStore = 0x1e;
// Used for embedder-provided serialization data for embedder fields.
static const int kEmbedderFieldsData = 0x1f;
- // Internal reference encoded as offsets of pc and target from code entry.
- static const int kInternalReference = 0x35;
- static const int kInternalReferenceEncoded = 0x36;
-
// Used to encode external referenced provided through the API.
- static const int kApiReference = 0x37;
+ static const int kApiReference = 0x38;
- // 8 hot (recently seen or back-referenced) objects with optional skip.
- static const int kNumberOfHotObjects = 8;
- STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
- // 0x38..0x3f
- static const int kHotObject = 0x38;
- // 0x58..0x5f
- static const int kHotObjectWithSkip = 0x58;
- static const int kHotObjectMask = 0x07;
+ static const int kVariableRawCode = 0x39;
+ static const int kVariableRawData = 0x3a;
+
+ static const int kInternalReference = 0x3b;
+ static const int kInternalReferenceEncoded = 0x3c;
+
+ // In-place weak references
+ static const int kWeakPrefix = 0x7e;
+
+ // Encodes an off-heap instruction stream target.
+ static const int kOffHeapTarget = 0x7f;
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
@@ -271,6 +256,15 @@ class SerializerDeserializer : public RootVisitor {
static const int kFixedRepeat = 0xe0;
static const int kFixedRepeatStart = kFixedRepeat - 1;
+ // 8 hot (recently seen or back-referenced) objects with optional skip.
+ static const int kNumberOfHotObjects = 8;
+ STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
+ // 0xf0..0xf7
+ static const int kHotObject = 0xf0;
+ // 0xf8..0xff
+ static const int kHotObjectWithSkip = 0xf8;
+ static const int kHotObjectMask = 0x07;
+
// ---------- special values ----------
static const int kAnyOldSpace = -1;
@@ -334,9 +328,7 @@ class SerializedData {
void AllocateData(uint32_t size);
- static uint32_t ComputeMagicNumber(Isolate* isolate) {
- return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
- }
+ static uint32_t ComputeMagicNumber(Isolate* isolate);
void SetMagicNumber(Isolate* isolate) {
SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index b477227154..c093707e5e 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -10,6 +10,7 @@
#include "src/objects/map.h"
#include "src/snapshot/builtin-serializer-allocator.h"
#include "src/snapshot/natives.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -185,7 +186,7 @@ bool Serializer<AllocatorT>::SerializeBackReference(HeapObject* obj,
template <class AllocatorT>
bool Serializer<AllocatorT>::SerializeBuiltinReference(
HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
- int skip, BuiltinReferenceSerializationMode mode) {
+ int skip) {
if (!obj->IsCode()) return false;
Code* code = Code::cast(obj);
@@ -197,11 +198,6 @@ bool Serializer<AllocatorT>::SerializeBuiltinReference(
DCHECK_LT(builtin_index, Builtins::builtin_count);
DCHECK_LE(0, builtin_index);
- if (mode == kCanonicalizeCompileLazy &&
- code->is_interpreter_trampoline_builtin()) {
- builtin_index = static_cast<int>(Builtins::kCompileLazy);
- }
-
if (FLAG_trace_serializer) {
PrintF(" Encoding builtin reference: %s\n",
isolate()->builtins()->name(builtin_index));
@@ -402,9 +398,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSTypedArray() {
FixedTypedArrayBase::cast(typed_array->elements());
if (!typed_array->WasNeutered()) {
- bool off_heap = elements->base_pointer() == nullptr;
-
- if (off_heap) {
+ if (!typed_array->is_on_heap()) {
// Explicitly serialize the backing store now.
JSArrayBuffer* buffer = JSArrayBuffer::cast(typed_array->buffer());
CHECK(buffer->byte_length()->IsSmi());
@@ -450,6 +444,7 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeJSArrayBuffer() {
buffer->set_backing_store(Smi::FromInt(ref));
}
SerializeObject();
+ buffer->set_backing_store(backing_store);
}
template <class AllocatorT>
@@ -670,12 +665,12 @@ void Serializer<AllocatorT>::ObjectSerializer::SerializeContent(Map* map,
// For code objects, output raw bytes first.
OutputCode(size);
// Then iterate references via reloc info.
- object_->IterateBody(map->instance_type(), size, this);
+ object_->IterateBody(map, size, this);
// Finally skip to the end.
serializer_->FlushSkip(SkipTo(object_->address() + size));
} else {
// For other objects, iterate references first.
- object_->IterateBody(map->instance_type(), size, this);
+ object_->IterateBody(map, size, this);
// Then output data payload, if any.
OutputRawData(object_->address() + size);
}
@@ -685,23 +680,37 @@ template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
Object** start,
Object** end) {
- Object** current = start;
- while (current < end) {
- while (current < end && (*current)->IsSmi()) current++;
- if (current < end) OutputRawData(reinterpret_cast<Address>(current));
+ VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
+ reinterpret_cast<MaybeObject**>(end));
+}
- while (current < end && !(*current)->IsSmi()) {
- HeapObject* current_contents = HeapObject::cast(*current);
+template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(
+ HeapObject* host, MaybeObject** start, MaybeObject** end) {
+ MaybeObject** current = start;
+ while (current < end) {
+ while (current < end &&
+ ((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject())) {
+ current++;
+ }
+ if (current < end) {
+ OutputRawData(reinterpret_cast<Address>(current));
+ }
+ HeapObject* current_contents;
+ HeapObjectReferenceType reference_type;
+ while (current < end && (*current)->ToStrongOrWeakHeapObject(
+ &current_contents, &reference_type)) {
int root_index = serializer_->root_index_map()->Lookup(current_contents);
// Repeats are not subject to the write barrier so we can only use
// immortal immovable root members. They are never in new space.
if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
Heap::RootIsImmortalImmovable(root_index) &&
- current_contents == current[-1]) {
+ *current == current[-1]) {
+ DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
int repeat_count = 1;
while (&current[repeat_count] < end - 1 &&
- current[repeat_count] == current_contents) {
+ current[repeat_count] == *current) {
repeat_count++;
}
current += repeat_count;
@@ -713,6 +722,9 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitPointers(HeapObject* host,
sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
}
} else {
+ if (reference_type == HeapObjectReferenceType::WEAK) {
+ sink_->Put(kWeakPrefix, "WeakReference");
+ }
serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
0);
bytes_processed_so_far_ += kPointerSize;
@@ -782,9 +794,9 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitInternalReference(
intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
intptr_t target_offset = rinfo->target_internal_reference() - entry;
DCHECK(0 <= pc_offset &&
- pc_offset <= Code::cast(object_)->instruction_size());
+ pc_offset <= Code::cast(object_)->raw_instruction_size());
DCHECK(0 <= target_offset &&
- target_offset <= Code::cast(object_)->instruction_size());
+ target_offset <= Code::cast(object_)->raw_instruction_size());
sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
? kInternalReference
: kInternalReferenceEncoded,
@@ -808,6 +820,29 @@ void Serializer<AllocatorT>::ObjectSerializer::VisitRuntimeEntry(
}
template <class AllocatorT>
+void Serializer<AllocatorT>::ObjectSerializer::VisitOffHeapTarget(
+ Code* host, RelocInfo* rinfo) {
+#ifdef V8_EMBEDDED_BUILTINS
+ {
+ STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
+ CHECK(Builtins::IsEmbeddedBuiltin(host));
+ Address addr = rinfo->target_off_heap_target();
+ CHECK_NOT_NULL(addr);
+ CHECK_NOT_NULL(
+ InstructionStream::TryLookupCode(serializer_->isolate(), addr));
+ }
+
+ int skip = SkipTo(rinfo->target_address_address());
+ sink_->Put(kOffHeapTarget, "OffHeapTarget");
+ sink_->PutInt(skip, "SkipB4OffHeapTarget");
+ sink_->PutInt(host->builtin_index(), "builtin index");
+ bytes_processed_so_far_ += rinfo->target_address_size();
+#else
+ UNREACHABLE();
+#endif
+}
+
+template <class AllocatorT>
void Serializer<AllocatorT>::ObjectSerializer::VisitCodeTarget(
Code* host, RelocInfo* rinfo) {
int skip = SkipTo(rinfo->target_address_address());
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 586c8802c0..9f8db6ccfe 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -117,12 +117,8 @@ class CodeAddressMap : public CodeEventLogger {
const char* name, int length) override {
address_to_name_map_.Insert(code->address(), name, length);
}
- void LogRecordedBuffer(const InstructionStream* stream, const char* name,
- int length) override {
- address_to_name_map_.Insert(stream->bytes(), name, length);
- }
- void LogRecordedBuffer(wasm::WasmCode* code, const char* name,
+ void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) override {
UNREACHABLE();
}
@@ -193,17 +189,10 @@ class Serializer : public SerializerDeserializer {
bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip);
- // Determines whether the interpreter trampoline is replaced by CompileLazy.
- enum BuiltinReferenceSerializationMode {
- kDefault,
- kCanonicalizeCompileLazy,
- };
-
// Returns true if the object was successfully serialized as a builtin
// reference.
- bool SerializeBuiltinReference(
- HeapObject* obj, HowToCode how_to_code, WhereToPoint where_to_point,
- int skip, BuiltinReferenceSerializationMode mode = kDefault);
+ bool SerializeBuiltinReference(HeapObject* obj, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip);
// Returns true if the given heap object is a bytecode handler code object.
bool ObjectIsBytecodeHandler(HeapObject* obj) const;
@@ -301,12 +290,15 @@ class Serializer<AllocatorT>::ObjectSerializer : public ObjectVisitor {
void SerializeObject();
void SerializeDeferred();
void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) override;
void VisitEmbeddedPointer(Code* host, RelocInfo* target) override;
void VisitExternalReference(Foreign* host, Address* p) override;
void VisitExternalReference(Code* host, RelocInfo* rinfo) override;
void VisitInternalReference(Code* host, RelocInfo* rinfo) override;
void VisitCodeTarget(Code* host, RelocInfo* target) override;
void VisitRuntimeEntry(Code* host, RelocInfo* reloc) override;
+ void VisitOffHeapTarget(Code* host, RelocInfo* target) override;
private:
void SerializePrologue(AllocationSpace space, int size, Map* map);
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 534339b2e5..a33c468bb8 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -8,6 +8,8 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
+#include "src/callable.h"
+#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
#include "src/snapshot/builtin-deserializer.h"
#include "src/snapshot/builtin-serializer.h"
@@ -123,6 +125,10 @@ Code* Snapshot::DeserializeBuiltin(Isolate* isolate, int builtin_id) {
void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
if (!FLAG_lazy_deserialization) return;
+ if (FLAG_trace_lazy_deserialization) {
+ PrintF("Forcing eager builtin deserialization\n");
+ }
+
Builtins* builtins = isolate->builtins();
for (int i = 0; i < Builtins::builtin_count; i++) {
if (!Builtins::IsLazy(i)) continue;
@@ -139,6 +145,29 @@ void Snapshot::EnsureAllBuiltinsAreDeserialized(Isolate* isolate) {
}
// static
+Code* Snapshot::EnsureBuiltinIsDeserialized(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared) {
+ DCHECK(FLAG_lazy_deserialization);
+
+ int builtin_id = shared->builtin_id();
+
+ // We should never lazily deserialize DeserializeLazy.
+ DCHECK_NE(Builtins::kDeserializeLazy, builtin_id);
+
+ // Look up code from builtins list.
+ Code* code = isolate->builtins()->builtin(builtin_id);
+
+ // Deserialize if builtin is not on the list.
+ if (code->builtin_index() != builtin_id) {
+ DCHECK_EQ(code->builtin_index(), Builtins::kDeserializeLazy);
+ code = Snapshot::DeserializeBuiltin(isolate, builtin_id);
+ DCHECK_EQ(builtin_id, code->builtin_index());
+ DCHECK_EQ(code, isolate->builtins()->builtin(builtin_id));
+ }
+ return code;
+}
+
+// static
Code* Snapshot::DeserializeHandler(Isolate* isolate,
interpreter::Bytecode bytecode,
interpreter::OperandScale operand_scale) {
@@ -264,6 +293,133 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
return result;
}
+#ifdef V8_EMBEDDED_BUILTINS
+namespace {
+bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code* code) {
+ DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
+ switch (Builtins::KindOf(code->builtin_index())) {
+ case Builtins::CPP:
+ case Builtins::TFC:
+ case Builtins::TFH:
+ case Builtins::TFJ:
+ case Builtins::TFS:
+ break;
+ case Builtins::API:
+ case Builtins::ASM:
+ // TODO(jgruber): Extend checks to remaining kinds.
+ return false;
+ }
+
+ Callable callable = Builtins::CallableFor(
+ isolate, static_cast<Builtins::Name>(code->builtin_index()));
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
+ return true;
+ }
+
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
+ Register reg = descriptor.GetRegisterParameter(i);
+ if (reg == kOffHeapTrampolineRegister) return true;
+ }
+
+ return false;
+}
+} // namespace
+
+// static
+EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
+ Builtins* builtins = isolate->builtins();
+
+ // Store instruction stream lengths and offsets.
+ std::vector<uint32_t> lengths(kTableSize);
+ std::vector<uint32_t> offsets(kTableSize);
+
+ bool saw_unsafe_builtin = false;
+ uint32_t raw_data_size = 0;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code* code = builtins->builtin(i);
+
+ if (Builtins::IsIsolateIndependent(i)) {
+ DCHECK(!Builtins::IsLazy(i));
+
+ // Sanity-check that the given builtin is process-independent and does not
+ // use the trampoline register in its calling convention.
+ if (!code->IsProcessIndependent()) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s is not process-independent.\n", Builtins::name(i));
+ }
+ if (BuiltinAliasesOffHeapTrampolineRegister(isolate, code)) {
+ saw_unsafe_builtin = true;
+ fprintf(stderr, "%s aliases the off-heap trampoline register.\n",
+ Builtins::name(i));
+ }
+
+ uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
+
+ DCHECK_EQ(0, raw_data_size % kCodeAlignment);
+ offsets[i] = raw_data_size;
+ lengths[i] = length;
+
+ // Align the start of each instruction stream.
+ raw_data_size += RoundUp<kCodeAlignment>(length);
+ } else {
+ offsets[i] = raw_data_size;
+ lengths[i] = 0;
+ }
+ }
+ CHECK(!saw_unsafe_builtin);
+
+ const uint32_t blob_size = RawDataOffset() + raw_data_size;
+ uint8_t* blob = new uint8_t[blob_size];
+ std::memset(blob, 0, blob_size);
+
+ // Write the offsets and length tables.
+ DCHECK_EQ(OffsetsSize(), sizeof(offsets[0]) * offsets.size());
+ std::memcpy(blob + OffsetsOffset(), offsets.data(), OffsetsSize());
+
+ DCHECK_EQ(LengthsSize(), sizeof(lengths[0]) * lengths.size());
+ std::memcpy(blob + LengthsOffset(), lengths.data(), LengthsSize());
+
+ // Write the raw data section.
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ if (!Builtins::IsIsolateIndependent(i)) continue;
+ Code* code = builtins->builtin(i);
+ uint32_t offset = offsets[i];
+ uint8_t* dst = blob + RawDataOffset() + offset;
+ DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
+ blob_size);
+ std::memcpy(dst, code->raw_instruction_start(),
+ code->raw_instruction_size());
+ }
+
+ return {blob, blob_size};
+}
+
+EmbeddedData EmbeddedData::FromBlob() {
+ const uint8_t* data = Isolate::CurrentEmbeddedBlob();
+ uint32_t size = Isolate::CurrentEmbeddedBlobSize();
+ DCHECK_NOT_NULL(data);
+ DCHECK_LT(0, size);
+ return {data, size};
+}
+
+const uint8_t* EmbeddedData::InstructionStartOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+
+ const uint32_t* offsets = Offsets();
+ const uint8_t* result = RawData() + offsets[i];
+ DCHECK_LT(result, data_ + size_);
+ return result;
+}
+
+uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
+ DCHECK(Builtins::IsBuiltinId(i));
+ const uint32_t* lengths = Lengths();
+ return lengths[i];
+}
+#endif
+
uint32_t Snapshot::ExtractNumContexts(const v8::StartupData* data) {
CHECK_LT(kNumberOfContextsOffset, data->raw_size);
uint32_t num_contexts = GetHeaderValue(data, kNumberOfContextsOffset);
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 23d6e3689f..b86a4ac9f9 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -79,6 +79,62 @@ class BuiltinSnapshotData final : public SnapshotData {
// ... list of builtins offsets
};
+#ifdef V8_EMBEDDED_BUILTINS
+class EmbeddedData final {
+ public:
+ static EmbeddedData FromIsolate(Isolate* isolate);
+ static EmbeddedData FromBlob();
+
+ const uint8_t* data() const { return data_; }
+ uint32_t size() const { return size_; }
+
+ void Dispose() { delete[] data_; }
+
+ const uint8_t* InstructionStartOfBuiltin(int i) const;
+ uint32_t InstructionSizeOfBuiltin(int i) const;
+
+ bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
+
+ // Padded with kCodeAlignment.
+ uint32_t PaddedInstructionSizeOfBuiltin(int i) const {
+ return RoundUp<kCodeAlignment>(InstructionSizeOfBuiltin(i));
+ }
+
+ // The layout of the blob is as follows:
+ //
+ // [0] offset of instruction stream 0
+ // ... offsets
+ // [N] length of instruction stream 0
+ // ... lengths
+ // ... instruction streams
+
+ static constexpr uint32_t kTableSize = Builtins::builtin_count;
+ static constexpr uint32_t OffsetsOffset() { return 0; }
+ static constexpr uint32_t OffsetsSize() { return kUInt32Size * kTableSize; }
+ static constexpr uint32_t LengthsOffset() {
+ return OffsetsOffset() + OffsetsSize();
+ }
+ static constexpr uint32_t LengthsSize() { return kUInt32Size * kTableSize; }
+ static constexpr uint32_t RawDataOffset() {
+ return RoundUp<kCodeAlignment>(LengthsOffset() + LengthsSize());
+ }
+
+ private:
+ EmbeddedData(const uint8_t* data, uint32_t size) : data_(data), size_(size) {}
+
+ const uint32_t* Offsets() const {
+ return reinterpret_cast<const uint32_t*>(data_ + OffsetsOffset());
+ }
+ const uint32_t* Lengths() const {
+ return reinterpret_cast<const uint32_t*>(data_ + LengthsOffset());
+ }
+ const uint8_t* RawData() const { return data_ + RawDataOffset(); }
+
+ const uint8_t* data_;
+ uint32_t size_;
+};
+#endif
+
class Snapshot : public AllStatic {
public:
// ---------------- Deserialization ----------------
@@ -98,6 +154,8 @@ class Snapshot : public AllStatic {
// initialized.
static Code* DeserializeBuiltin(Isolate* isolate, int builtin_id);
static void EnsureAllBuiltinsAreDeserialized(Isolate* isolate);
+ static Code* EnsureBuiltinIsDeserialized(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared);
// Deserializes a single given handler code object. Intended to be called at
// runtime after the isolate has been fully initialized.
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index d0369984b8..16d731493d 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -20,7 +20,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
if (!DefaultDeserializerAllocator::ReserveSpace(this,
&builtin_deserializer)) {
- V8::FatalProcessOutOfMemory("StartupDeserializer");
+ V8::FatalProcessOutOfMemory(isolate, "StartupDeserializer");
}
// No active threads.
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index b02d572595..dc85a57e11 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -12,13 +12,8 @@
namespace v8 {
namespace internal {
-StartupSerializer::StartupSerializer(
- Isolate* isolate,
- v8::SnapshotCreator::FunctionCodeHandling function_code_handling)
- : Serializer(isolate),
- clear_function_code_(function_code_handling ==
- v8::SnapshotCreator::FunctionCodeHandling::kClear),
- can_be_rehashed_(true) {
+StartupSerializer::StartupSerializer(Isolate* isolate)
+ : Serializer(isolate), can_be_rehashed_(true) {
InitializeCodeAddressMap();
}
@@ -33,13 +28,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
DCHECK(!ObjectIsBytecodeHandler(obj)); // Only referenced in dispatch table.
DCHECK(!obj->IsJSFunction());
- if (clear_function_code() && obj->IsBytecodeArray()) {
- obj = isolate()->heap()->undefined_value();
- }
-
- BuiltinReferenceSerializationMode mode =
- clear_function_code() ? kCanonicalizeCompileLazy : kDefault;
- if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip, mode)) {
+ if (SerializeBuiltinReference(obj, how_to_code, where_to_point, skip)) {
return;
}
if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index ad440965b0..190cc59529 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -14,9 +14,7 @@ namespace internal {
class StartupSerializer : public Serializer<> {
public:
- StartupSerializer(
- Isolate* isolate,
- v8::SnapshotCreator::FunctionCodeHandling function_code_handling);
+ explicit StartupSerializer(Isolate* isolate);
~StartupSerializer() override;
// Serialize the current state of the heap. The order is:
@@ -30,7 +28,6 @@ class StartupSerializer : public Serializer<> {
int PartialSnapshotCacheIndex(HeapObject* o);
bool can_be_rehashed() const { return can_be_rehashed_; }
- bool clear_function_code() const { return clear_function_code_; }
bool root_has_been_serialized(int root_index) const {
return root_has_been_serialized_.test(root_index);
}
@@ -72,7 +69,6 @@ class StartupSerializer : public Serializer<> {
void CheckRehashability(HeapObject* obj);
- const bool clear_function_code_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
std::vector<AccessorInfo*> accessor_infos_;
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index f7306c82ce..1057aba08c 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -75,13 +75,13 @@ void EncodeEntry(std::vector<byte>& bytes, const PositionTableEntry& entry) {
// Helper: Decode an integer.
template <typename T>
-T DecodeInt(ByteArray* bytes, int* index) {
+T DecodeInt(Vector<const byte> bytes, int* index) {
byte current;
int shift = 0;
T decoded = 0;
bool more;
do {
- current = bytes->get((*index)++);
+ current = bytes[(*index)++];
decoded |= static_cast<typename std::make_unsigned<T>::type>(
ValueBits::decode(current))
<< shift;
@@ -93,7 +93,8 @@ T DecodeInt(ByteArray* bytes, int* index) {
return decoded;
}
-void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
+void DecodeEntry(Vector<const byte> bytes, int* index,
+ PositionTableEntry* entry) {
int tmp = DecodeInt<int>(bytes, index);
if (tmp >= 0) {
entry->is_statement = true;
@@ -105,6 +106,11 @@ void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
entry->source_position = DecodeInt<int64_t>(bytes, index);
}
+Vector<const byte> VectorFromByteArray(ByteArray* byte_array) {
+ return Vector<const byte>(byte_array->GetDataStartAddress(),
+ byte_array->length());
+}
+
} // namespace
SourcePositionTableBuilder::SourcePositionTableBuilder(
@@ -159,7 +165,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
}
SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
- : raw_table_(byte_array) {
+ : raw_table_(VectorFromByteArray(byte_array)) {
Advance();
}
@@ -171,15 +177,24 @@ SourcePositionTableIterator::SourcePositionTableIterator(
no_gc.Release();
}
+SourcePositionTableIterator::SourcePositionTableIterator(
+ Vector<const byte> bytes)
+ : raw_table_(bytes) {
+ Advance();
+ // We can enable allocation because the underlying vector does not move.
+ no_gc.Release();
+}
+
void SourcePositionTableIterator::Advance() {
- ByteArray* table = raw_table_ ? raw_table_ : *table_;
+ Vector<const byte> bytes =
+ table_.is_null() ? raw_table_ : VectorFromByteArray(*table_);
DCHECK(!done());
- DCHECK(index_ >= 0 && index_ <= table->length());
- if (index_ >= table->length()) {
+ DCHECK(index_ >= 0 && index_ <= bytes.length());
+ if (index_ >= bytes.length()) {
index_ = kDone;
} else {
PositionTableEntry tmp;
- DecodeEntry(table, &index_, &tmp);
+ DecodeEntry(bytes, &index_, &tmp);
AddAndSetEntry(current_, tmp);
}
}
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 9028e50f79..652f5aa34a 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -64,7 +64,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
PositionTableEntry position_;
};
- // We expose two flavours of the iterator, depending on the argument passed
+ // We expose three flavours of the iterator, depending on the argument passed
// to the constructor:
// Handlified iterator allows allocation, but it needs a handle (and thus
@@ -76,6 +76,10 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
// scope around.
explicit SourcePositionTableIterator(ByteArray* byte_array);
+ // Handle-safe iterator based on an a vector located outside the garbage
+ // collected heap, allows allocation during its lifetime.
+ explicit SourcePositionTableIterator(Vector<const byte> bytes);
+
void Advance();
int code_offset() const {
@@ -102,7 +106,7 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
private:
static const int kDone = -1;
- ByteArray* raw_table_ = nullptr;
+ Vector<const byte> raw_table_;
Handle<ByteArray> table_;
int index_ = 0;
PositionTableEntry current_;
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index b45567629a..ab1f1e77d8 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/source-position.h"
-#include "src/compilation-info.h"
#include "src/objects-inl.h"
+#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -50,7 +50,7 @@ std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
}
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
- CompilationInfo* cinfo) const {
+ OptimizedCompilationInfo* cinfo) const {
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index 41e32557ce..de2cbce84e 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -16,7 +16,7 @@ namespace v8 {
namespace internal {
class Code;
-class CompilationInfo;
+class OptimizedCompilationInfo;
class Script;
class SharedFunctionInfo;
struct SourcePositionInfo;
@@ -26,7 +26,7 @@ struct SourcePositionInfo;
// - inlining_id (16 bit non-negative int or kNotInlined).
//
// A defined inlining_id refers to positions in
-// CompilationInfo::inlined_functions or
+// OptimizedCompilationInfo::inlined_functions or
// DeoptimizationData::InliningPositions, depending on the compilation stage.
class SourcePosition final {
public:
@@ -44,7 +44,8 @@ class SourcePosition final {
// Assumes that the code object is optimized
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
- std::vector<SourcePositionInfo> InliningStack(CompilationInfo* cinfo) const;
+ std::vector<SourcePositionInfo> InliningStack(
+ OptimizedCompilationInfo* cinfo) const;
void Print(std::ostream& out, Code* code) const;
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
index 5f5472f0af..e9b6f6ef53 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/startup-data-util.cc
@@ -10,6 +10,7 @@
#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/flags.h"
#include "src/utils.h"
@@ -86,9 +87,15 @@ void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
char* natives;
char* snapshot;
+ const char* snapshot_name = "snapshot_blob.bin";
+#ifdef V8_MULTI_SNAPSHOTS
+ if (!FLAG_untrusted_code_mitigations) {
+ snapshot_name = "snapshot_blob_trusted.bin";
+ }
+#endif
LoadFromFiles(
base::RelativePath(&natives, directory_path, "natives_blob.bin"),
- base::RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
+ base::RelativePath(&snapshot, directory_path, snapshot_name));
free(natives);
free(snapshot);
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 53e0462c67..66776dfe67 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -6,8 +6,8 @@
#define V8_STRING_BUILDER_H_
#include "src/assert-scope.h"
-#include "src/factory.h"
#include "src/handles.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/utils.h"
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 7693a229bf..272b2a354d 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -481,7 +481,7 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
- Object* name = fun->shared()->name();
+ Object* name = fun->shared()->Name();
bool print_name = false;
Isolate* isolate = fun->GetIsolate();
if (receiver->IsNullOrUndefined(isolate) || receiver->IsTheHole(isolate) ||
@@ -516,7 +516,7 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
// which it was looked up.
if (print_name) {
Add("(aka ");
- PrintName(fun->shared()->name());
+ PrintName(fun->shared()->Name());
Put(')');
}
}
diff --git a/deps/v8/src/third_party/vtune/BUILD.gn b/deps/v8/src/third_party/vtune/BUILD.gn
index 33e8443b98..a2163ebaf1 100644
--- a/deps/v8/src/third_party/vtune/BUILD.gn
+++ b/deps/v8/src/third_party/vtune/BUILD.gn
@@ -15,6 +15,6 @@ static_library("v8_vtune") {
"vtune-jit.h",
]
deps = [
- "//:v8",
+ "../../..:v8",
]
}
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index ac564ad9c2..59aac025ee 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -15,23 +15,18 @@
namespace v8 {
namespace internal {
-template <TransitionsAccessor::Encoding enc>
WeakCell* TransitionsAccessor::GetTargetCell() {
DCHECK(!needs_reload_);
- if (target_cell_ != nullptr) return target_cell_;
- if (enc == kWeakCell) {
- target_cell_ = WeakCell::cast(raw_transitions_);
- } else if (enc == kHandler) {
- target_cell_ = StoreHandler::GetTransitionCell(raw_transitions_);
- } else {
- UNREACHABLE();
+ if (target_cell_ == nullptr) {
+ target_cell_ =
+ StoreHandler::GetTransitionCell(raw_transitions_->ToStrongHeapObject());
}
return target_cell_;
}
TransitionArray* TransitionsAccessor::transitions() {
DCHECK_EQ(kFullTransitionArray, encoding());
- return TransitionArray::cast(raw_transitions_);
+ return TransitionArray::cast(raw_transitions_->ToStrongHeapObject());
}
CAST_ACCESSOR(TransitionArray)
@@ -71,23 +66,25 @@ Name* TransitionArray::GetKey(int transition_number) {
}
Name* TransitionsAccessor::GetKey(int transition_number) {
- WeakCell* cell = nullptr;
+ Map* map = nullptr;
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
UNREACHABLE();
return nullptr;
- case kWeakCell:
- cell = GetTargetCell<kWeakCell>();
+ case kWeakRef:
+ map = Map::cast(raw_transitions_->ToWeakHeapObject());
break;
- case kHandler:
- cell = GetTargetCell<kHandler>();
+ case kHandler: {
+ WeakCell* cell = GetTargetCell();
+ DCHECK(!cell->cleared());
+ map = Map::cast(cell->value());
break;
+ }
case kFullTransitionArray:
return transitions()->GetKey(transition_number);
}
- DCHECK(!cell->cleared());
- return GetSimpleTransitionKey(Map::cast(cell->value()));
+ return GetSimpleTransitionKey(map);
}
void TransitionArray::SetKey(int transition_number, Name* key) {
@@ -127,23 +124,22 @@ Map* TransitionArray::GetTarget(int transition_number) {
}
Map* TransitionsAccessor::GetTarget(int transition_number) {
- WeakCell* cell = nullptr;
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
UNREACHABLE();
return nullptr;
- case kWeakCell:
- cell = GetTargetCell<kWeakCell>();
- break;
- case kHandler:
- cell = GetTargetCell<kHandler>();
- break;
+ case kWeakRef:
+ return Map::cast(raw_transitions_->ToWeakHeapObject());
+ case kHandler: {
+ WeakCell* cell = GetTargetCell();
+ DCHECK(!cell->cleared());
+ return Map::cast(cell->value());
+ }
case kFullTransitionArray:
return transitions()->GetTarget(transition_number);
}
- DCHECK(!cell->cleared());
- return Map::cast(cell->value());
+ UNREACHABLE();
}
void TransitionArray::SetTarget(int transition_number, Object* value) {
@@ -152,6 +148,15 @@ void TransitionArray::SetTarget(int transition_number, Object* value) {
set(ToTargetIndex(transition_number), value);
}
+bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
+ Map** target) {
+ Object* raw = GetRawTarget(transition_number);
+ if (raw->IsUndefined(isolate)) {
+ return false;
+ }
+ *target = TransitionsAccessor::GetTargetFromRaw(raw);
+ return true;
+}
int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
DCHECK(name->IsUniqueName());
@@ -202,7 +207,7 @@ void TransitionArray::Set(int transition_number, Name* key, Object* target) {
int TransitionArray::Capacity() {
if (length() <= kFirstIndex) return 0;
- return (length() - kFirstIndex) / kTransitionSize;
+ return (length() - kFirstIndex) / kEntrySize;
}
void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 1d6f9a05be..3f312a2306 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -13,17 +13,23 @@ namespace internal {
void TransitionsAccessor::Initialize() {
raw_transitions_ = map_->raw_transitions();
- if (raw_transitions_->IsSmi()) {
+ HeapObject* heap_object;
+ if (raw_transitions_->IsSmi() ||
+ raw_transitions_->IsClearedWeakHeapObject()) {
encoding_ = kUninitialized;
- } else if (HeapObject::cast(raw_transitions_)->IsWeakCell()) {
- encoding_ = kWeakCell;
- } else if (HeapObject::cast(raw_transitions_)->IsStoreHandler()) {
- encoding_ = kHandler;
- } else if (HeapObject::cast(raw_transitions_)->IsTransitionArray()) {
- encoding_ = kFullTransitionArray;
+ } else if (raw_transitions_->IsWeakHeapObject()) {
+ encoding_ = kWeakRef;
+ } else if (raw_transitions_->ToStrongHeapObject(&heap_object)) {
+ if (heap_object->IsStoreHandler()) {
+ encoding_ = kHandler;
+ } else if (heap_object->IsTransitionArray()) {
+ encoding_ = kFullTransitionArray;
+ } else {
+ DCHECK(heap_object->IsPrototypeInfo());
+ encoding_ = kPrototypeInfo;
+ }
} else {
- DCHECK(HeapObject::cast(raw_transitions_)->IsPrototypeInfo());
- encoding_ = kPrototypeInfo;
+ UNREACHABLE();
}
target_cell_ = nullptr;
#if DEBUG
@@ -33,48 +39,46 @@ void TransitionsAccessor::Initialize() {
Map* TransitionsAccessor::GetSimpleTransition() {
switch (encoding()) {
- case kWeakCell:
- return Map::cast(GetTargetCell<kWeakCell>()->value());
+ case kWeakRef:
+ return Map::cast(raw_transitions_->ToWeakHeapObject());
case kHandler:
- return Map::cast(GetTargetCell<kHandler>()->value());
+ return Map::cast(GetTargetCell()->value());
default:
return nullptr;
}
}
-bool TransitionsAccessor::HasSimpleTransitionTo(WeakCell* cell) {
- DCHECK(cell->value()->IsMap());
+bool TransitionsAccessor::HasSimpleTransitionTo(Map* map) {
switch (encoding()) {
- case kWeakCell:
- return raw_transitions_ == cell;
+ case kWeakRef:
+ return raw_transitions_->ToWeakHeapObject() == map;
case kHandler:
- return StoreHandler::GetTransitionCell(raw_transitions_) == cell;
+ return StoreHandler::GetTransitionCell(
+ raw_transitions_->ToStrongHeapObject())
+ ->value() == map;
case kPrototypeInfo:
case kUninitialized:
case kFullTransitionArray:
return false;
}
UNREACHABLE();
- return false; // Make GCC happy.
}
void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
SimpleTransitionFlag flag) {
DCHECK(!map_handle_.is_null());
Isolate* isolate = map_->GetIsolate();
- Handle<WeakCell> weak_cell_with_target = Map::WeakCellForMap(target);
- Reload();
target->SetBackPointer(map_);
// If the map doesn't have any transitions at all yet, install the new one.
if (encoding() == kUninitialized) {
if (flag == SIMPLE_PROPERTY_TRANSITION) {
- ReplaceTransitions(*weak_cell_with_target);
+ ReplaceTransitions(HeapObjectReference::Weak(*target));
return;
}
// If the flag requires a full TransitionArray, allocate one.
Handle<TransitionArray> result = TransitionArray::Allocate(isolate, 0, 1);
- ReplaceTransitions(*result);
+ ReplaceTransitions(MaybeObject::FromObject(*result));
Reload();
}
@@ -90,27 +94,39 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
if (flag == SIMPLE_PROPERTY_TRANSITION && key->Equals(*name) &&
old_details.kind() == new_details.kind() &&
old_details.attributes() == new_details.attributes()) {
- ReplaceTransitions(*weak_cell_with_target);
+ ReplaceTransitions(HeapObjectReference::Weak(*target));
return;
}
// Otherwise allocate a full TransitionArray with slack for a new entry.
+ Handle<Map> map(simple_transition);
+ Handle<WeakCell> weak_cell = Map::WeakCellForMap(map);
Handle<TransitionArray> result = TransitionArray::Allocate(isolate, 1, 1);
- // Reload state; the allocation might have caused it to be cleared.
+ // Reload state; allocations might have caused it to be cleared.
Reload();
simple_transition = GetSimpleTransition();
if (simple_transition != nullptr) {
- result->Set(0, GetSimpleTransitionKey(simple_transition),
- raw_transitions_);
+ DCHECK_EQ(*map, simple_transition);
+ if (encoding_ == kWeakRef) {
+ result->Set(0, GetSimpleTransitionKey(simple_transition), *weak_cell);
+ } else if (encoding_ == kHandler) {
+ result->Set(0, GetSimpleTransitionKey(simple_transition),
+ raw_transitions_->ToStrongHeapObject());
+ } else {
+ UNREACHABLE();
+ }
} else {
result->SetNumberOfTransitions(0);
}
- ReplaceTransitions(*result);
+ ReplaceTransitions(MaybeObject::FromObject(*result));
Reload();
}
// At this point, we know that the map has a full TransitionArray.
DCHECK_EQ(kFullTransitionArray, encoding());
+ Handle<WeakCell> weak_cell_with_target = Map::WeakCellForMap(target);
+ Reload();
+
int number_of_transitions = 0;
int new_nof = 0;
int insertion_index = kNotFound;
@@ -202,88 +218,34 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
}
SLOW_DCHECK(result->IsSortedNoDuplicates());
- ReplaceTransitions(*result);
-}
-
-void TransitionsAccessor::UpdateHandler(Name* name, Object* handler) {
- if (map_->is_dictionary_map()) return;
- switch (encoding()) {
- case kPrototypeInfo:
- case kUninitialized:
- UNREACHABLE();
- return;
- case kWeakCell:
- case kHandler:
- DCHECK_EQ(GetSimpleTransition(), GetTargetFromRaw(handler));
- ReplaceTransitions(handler);
- return;
- case kFullTransitionArray: {
- PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
- int transition = transitions()->Search(kData, name, attributes);
- DCHECK_NE(kNotFound, transition);
- DCHECK_EQ(transitions()->GetTarget(transition),
- GetTargetFromRaw(handler));
- transitions()->SetTarget(transition, handler);
- return;
- }
- }
-}
-
-Object* TransitionsAccessor::SearchHandler(Name* name,
- Handle<Map>* out_transition) {
- switch (encoding()) {
- case kPrototypeInfo:
- case kUninitialized:
- case kWeakCell:
- return nullptr;
- case kHandler: {
- Object* raw_handler = StoreHandler::ValidHandlerOrNull(
- raw_transitions_, name, out_transition);
- if (raw_handler == nullptr) return raw_handler;
- // Check transition key.
- WeakCell* target_cell = StoreHandler::GetTransitionCell(raw_handler);
- if (!IsMatchingMap(target_cell, name, kData, NONE)) return nullptr;
- return raw_handler;
- }
-
- case kFullTransitionArray: {
- int transition = transitions()->Search(kData, name, NONE);
- if (transition == kNotFound) return nullptr;
- Object* raw_handler = transitions()->GetRawTarget(transition);
- if (raw_handler->IsStoreHandler()) {
- return StoreHandler::ValidHandlerOrNull(raw_handler, name,
- out_transition);
- }
- return nullptr;
- }
- }
- UNREACHABLE();
- return nullptr; // Make GCC happy.
+ ReplaceTransitions(MaybeObject::FromObject(*result));
}
Map* TransitionsAccessor::SearchTransition(Name* name, PropertyKind kind,
PropertyAttributes attributes) {
DCHECK(name->IsUniqueName());
- WeakCell* cell = nullptr;
+ Map* map = nullptr;
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
return nullptr;
- case kWeakCell:
- cell = GetTargetCell<kWeakCell>();
+ case kWeakRef:
+ map = Map::cast(raw_transitions_->ToWeakHeapObject());
break;
- case kHandler:
- cell = GetTargetCell<kHandler>();
+ case kHandler: {
+ WeakCell* cell = GetTargetCell();
+ DCHECK(!cell->cleared());
+ map = Map::cast(cell->value());
break;
+ }
case kFullTransitionArray: {
int transition = transitions()->Search(kind, name, attributes);
if (transition == kNotFound) return nullptr;
return transitions()->GetTarget(transition);
}
}
- DCHECK(!cell->cleared());
- if (!IsMatchingMap(cell, name, kind, attributes)) return nullptr;
- return Map::cast(cell->value());
+ if (!IsMatchingMap(map, name, kind, attributes)) return nullptr;
+ return map;
}
Map* TransitionsAccessor::SearchSpecial(Symbol* name) {
@@ -303,35 +265,40 @@ bool TransitionsAccessor::IsSpecialTransition(Name* name) {
name == heap->strict_function_transition_symbol();
}
-Handle<Map> TransitionsAccessor::FindTransitionToField(Handle<Name> name) {
+MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
+ Handle<Name> name, RequestedLocation requested_location) {
DCHECK(name->IsUniqueName());
DisallowHeapAllocation no_gc;
- Map* target = SearchTransition(*name, kData, NONE);
- if (target == nullptr) return Handle<Map>::null();
+ PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
+ Map* target = SearchTransition(*name, kData, attributes);
+ if (target == nullptr) return MaybeHandle<Map>();
PropertyDetails details = target->GetLastDescriptorDetails();
- DCHECK_EQ(NONE, details.attributes());
- if (details.location() != kField) return Handle<Map>::null();
+ DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
+ if (requested_location == kFieldOnly && details.location() != kField) {
+ return MaybeHandle<Map>();
+ }
return Handle<Map>(target);
}
Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
DisallowHeapAllocation no_gc;
- WeakCell* cell = nullptr;
+ Map* target = nullptr;
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
case kFullTransitionArray:
return Handle<String>::null();
- case kWeakCell:
- cell = GetTargetCell<kWeakCell>();
+ case kWeakRef:
+ target = Map::cast(raw_transitions_->ToWeakHeapObject());
break;
- case kHandler:
- cell = GetTargetCell<kHandler>();
+ case kHandler: {
+ WeakCell* cell = GetTargetCell();
+ DCHECK(!cell->cleared());
+ target = Map::cast(cell->value());
break;
+ }
}
- DCHECK(!cell->cleared());
- Map* target = Map::cast(cell->value());
PropertyDetails details = GetSimpleTargetDetails(target);
if (details.location() != kField) return Handle<String>::null();
DCHECK_EQ(kData, details.kind());
@@ -355,10 +322,9 @@ bool TransitionsAccessor::CanHaveMoreTransitions() {
}
// static
-bool TransitionsAccessor::IsMatchingMap(WeakCell* target_cell, Name* name,
+bool TransitionsAccessor::IsMatchingMap(Map* target, Name* name,
PropertyKind kind,
PropertyAttributes attributes) {
- Map* target = Map::cast(target_cell->value());
int descriptor = target->LastAdded();
DescriptorArray* descriptors = target->instance_descriptors();
Name* key = descriptors->GetKey(descriptor);
@@ -485,7 +451,7 @@ int TransitionsAccessor::NumberOfTransitions() {
case kPrototypeInfo:
case kUninitialized:
return 0;
- case kWeakCell:
+ case kWeakRef:
case kHandler:
return 1;
case kFullTransitionArray:
@@ -512,12 +478,13 @@ void TransitionArray::Zap() {
SetNumberOfTransitions(0);
}
-void TransitionsAccessor::ReplaceTransitions(Object* new_transitions) {
+void TransitionsAccessor::ReplaceTransitions(MaybeObject* new_transitions) {
if (encoding() == kFullTransitionArray) {
TransitionArray* old_transitions = transitions();
#if DEBUG
- CheckNewTransitionsAreConsistent(old_transitions, new_transitions);
- DCHECK(old_transitions != new_transitions);
+ CheckNewTransitionsAreConsistent(old_transitions,
+ new_transitions->ToStrongHeapObject());
+ DCHECK(old_transitions != new_transitions->ToStrongHeapObject());
#endif
// Transition arrays are not shared. When one is replaced, it should not
// keep referenced objects alive, so we zap it.
@@ -555,7 +522,7 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
result->Set(0, key, *weak_cell_with_target);
}
}
- ReplaceTransitions(*result);
+ ReplaceTransitions(MaybeObject::FromObject(*result));
Reload(); // Reload after replacing transitions.
}
@@ -566,11 +533,11 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
case kPrototypeInfo:
case kUninitialized:
break;
- case kWeakCell:
- simple_target = Map::cast(GetTargetCell<kWeakCell>()->value());
+ case kWeakRef:
+ simple_target = Map::cast(raw_transitions_->ToWeakHeapObject());
break;
case kHandler:
- simple_target = Map::cast(GetTargetCell<kHandler>()->value());
+ simple_target = Map::cast(GetTargetCell()->value());
break;
case kFullTransitionArray: {
if (transitions()->HasPrototypeTransitions()) {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 62a8bb50d4..562f0ebe92 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -27,15 +27,14 @@ namespace internal {
// operations in a row (provided no GC happens between them), it must be
// discarded and recreated after "Insert" and "UpdateHandler" operations.
//
-// Internal details: a Map's field either holds a WeakCell to a transition
-// target, or a StoreIC handler for a transitioning store (which in turn points
-// to its target map), or a TransitionArray for several target maps and/or
-// handlers as well as prototype and ElementsKind transitions.
-// Property details (and in case of inline target storage, the key) are
-// retrieved from the target map's descriptor array.
-// Stored transitions are weak in the GC sense: both single transitions stored
-// inline and TransitionArray fields are cleared when the map they refer to
-// is not otherwise reachable.
+// Internal details: a Map's field either holds an in-place weak reference to a
+// transition target, or a StoreIC handler for a transitioning store (which in
+// turn points to its target map), or a TransitionArray for several target maps
+// and/or handlers as well as prototype and ElementsKind transitions. Property
+// details (and in case of inline target storage, the key) are retrieved from
+// the target map's descriptor array. Stored transitions are weak in the GC
+// sense: both single transitions stored inline and TransitionArray fields are
+// cleared when the map they refer to is not otherwise reachable.
class TransitionsAccessor {
public:
TransitionsAccessor(Map* map, DisallowHeapAllocation* no_gc) : map_(map) {
@@ -55,19 +54,18 @@ class TransitionsAccessor {
Map* SearchTransition(Name* name, PropertyKind kind,
PropertyAttributes attributes);
- // This TransitionsAccessor instance is unusable after this operation.
- void UpdateHandler(Name* name, Object* handler);
-
- // If a valid handler is found, returns the transition target in
- // |out_transition|.
- Object* SearchHandler(Name* name, Handle<Map>* out_transition);
-
Map* SearchSpecial(Symbol* name);
// Returns true for non-property transitions like elements kind, or
// or frozen/sealed transitions.
static bool IsSpecialTransition(Name* name);
- Handle<Map> FindTransitionToField(Handle<Name> name);
+ enum RequestedLocation { kAnyLocation, kFieldOnly };
+ MaybeHandle<Map> FindTransitionToDataProperty(
+ Handle<Name> name, RequestedLocation requested_location = kAnyLocation);
+
+ MaybeHandle<Map> FindTransitionToField(Handle<Name> name) {
+ return FindTransitionToDataProperty(name, kFieldOnly);
+ }
Handle<String> ExpectedTransitionKey();
Handle<Map> ExpectedTransitionTarget();
@@ -82,8 +80,8 @@ class TransitionsAccessor {
inline Map* GetTarget(int transition_number);
static inline PropertyDetails GetTargetDetails(Name* name, Map* target);
- static bool IsMatchingMap(WeakCell* target_cell, Name* name,
- PropertyKind kind, PropertyAttributes attributes);
+ static bool IsMatchingMap(Map* target, Name* name, PropertyKind kind,
+ PropertyAttributes attributes);
// ===== ITERATION =====
typedef void (*TraverseCallback)(Map* map, void* data);
@@ -126,7 +124,9 @@ class TransitionsAccessor {
enum Encoding {
kPrototypeInfo,
kUninitialized,
- kWeakCell,
+ kWeakRef,
+ // TODO(ishell): drop support for kHandler encoding since we use maps
+ // as transition handlers.
kHandler,
kFullTransitionArray,
};
@@ -166,13 +166,14 @@ class TransitionsAccessor {
void Initialize();
inline Map* GetSimpleTransition();
- bool HasSimpleTransitionTo(WeakCell* cell);
+ bool HasSimpleTransitionTo(Map* map);
- void ReplaceTransitions(Object* new_transitions);
+ void ReplaceTransitions(MaybeObject* new_transitions);
- template <Encoding enc>
inline WeakCell* GetTargetCell();
+ inline Map* GetTargetMapFromWeakRef();
+
void EnsureHasFullTransitionArray();
void SetPrototypeTransitions(Handle<FixedArray> proto_transitions);
FixedArray* GetPrototypeTransitions();
@@ -184,7 +185,7 @@ class TransitionsAccessor {
Handle<Map> map_handle_;
Map* map_;
- Object* raw_transitions_;
+ MaybeObject* raw_transitions_;
Encoding encoding_;
WeakCell* target_cell_;
#if DEBUG
@@ -223,6 +224,8 @@ class TransitionArray : public FixedArray {
inline void SetTarget(int transition_number, Object* target);
inline Object* GetRawTarget(int transition_number);
inline Object** GetTargetSlot(int transition_number);
+ inline bool GetTargetIfExists(int transition_number, Isolate* isolate,
+ Map** target);
// Required for templatized Search interface.
static const int kNotFound = -1;
@@ -246,12 +249,34 @@ class TransitionArray : public FixedArray {
DECL_PRINTER(TransitionArray)
DECL_VERIFIER(TransitionArray)
+ // Layout for full transition arrays.
+ static const int kPrototypeTransitionsIndex = 0;
+ static const int kTransitionLengthIndex = 1;
+ static const int kFirstIndex = 2;
+
+ // Layout of map transition entries in full transition arrays.
+ static const int kEntryKeyIndex = 0;
+ static const int kEntryTargetIndex = 1;
+ static const int kEntrySize = 2;
+
+ // Conversion from transition number to array indices.
+ static int ToKeyIndex(int transition_number) {
+ return kFirstIndex + (transition_number * kEntrySize) + kEntryKeyIndex;
+ }
+
+ static int ToTargetIndex(int transition_number) {
+ return kFirstIndex + (transition_number * kEntrySize) + kEntryTargetIndex;
+ }
+
+ inline int SearchNameForTesting(Name* name,
+ int* out_insertion_index = nullptr) {
+ return SearchName(name, out_insertion_index);
+ }
+
private:
friend class MarkCompactCollector;
friend class TransitionsAccessor;
- static const int kTransitionSize = 2;
-
inline void SetNumberOfTransitions(int number_of_transitions);
inline int Capacity();
@@ -273,32 +298,9 @@ class TransitionArray : public FixedArray {
static void SetNumberOfPrototypeTransitions(FixedArray* proto_transitions,
int value);
- // Layout for full transition arrays.
- static const int kPrototypeTransitionsIndex = 0;
- static const int kTransitionLengthIndex = 1;
- static const int kFirstIndex = 2;
-
- // Layout of map transition entries in full transition arrays.
- static const int kTransitionKey = 0;
- static const int kTransitionTarget = 1;
- STATIC_ASSERT(kTransitionSize == 2);
-
static const int kProtoTransitionNumberOfEntriesOffset = 0;
STATIC_ASSERT(kProtoTransitionHeaderSize == 1);
- // Conversion from transition number to array indices.
- static int ToKeyIndex(int transition_number) {
- return kFirstIndex +
- (transition_number * kTransitionSize) +
- kTransitionKey;
- }
-
- static int ToTargetIndex(int transition_number) {
- return kFirstIndex +
- (transition_number * kTransitionSize) +
- kTransitionTarget;
- }
-
// Returns the fixed array length required to hold number_of_transitions
// transitions.
static int LengthFor(int number_of_transitions) {
diff --git a/deps/v8/src/trap-handler/handler-outside.cc b/deps/v8/src/trap-handler/handler-outside.cc
index 4dc7057782..0b2301953d 100644
--- a/deps/v8/src/trap-handler/handler-outside.cc
+++ b/deps/v8/src/trap-handler/handler-outside.cc
@@ -45,8 +45,8 @@ namespace v8 {
namespace internal {
namespace trap_handler {
-const size_t kInitialCodeObjectSize = 1024;
-const size_t kCodeObjectGrowthFactor = 2;
+constexpr size_t kInitialCodeObjectSize = 1024;
+constexpr size_t kCodeObjectGrowthFactor = 2;
constexpr size_t HandlerDataSize(size_t num_protected_instructions) {
return offsetof(CodeProtectionInfo, instructions) +
@@ -135,15 +135,6 @@ CodeProtectionInfo* CreateHandlerData(
return data;
}
-void UpdateHandlerDataCodePointer(int index, void* base) {
- MetadataLock lock;
- if (static_cast<size_t>(index) >= gNumCodeObjects) {
- abort();
- }
- CodeProtectionInfo* data = gCodeObjects[index].code_info;
- data->base = base;
-}
-
int RegisterHandlerData(
void* base, size_t size, size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions) {
@@ -264,6 +255,26 @@ bool RegisterDefaultSignalHandler() {
return false;
}
+// Sanitizers often prevent us from installing our own signal handler. Attempt
+// to detect this and if so, refuse to enable trap handling.
+//
+// TODO(chromium:830894): Remove this once all bots support custom signal
+// handlers.
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) || \
+ defined(UNDEFINED_SANITIZER)
+ struct sigaction installed_handler;
+ CHECK_EQ(sigaction(SIGSEGV, NULL, &installed_handler), 0);
+ // If the installed handler does not point to HandleSignal, then
+ // allow_user_segv_handler is 0.
+ if (installed_handler.sa_sigaction != HandleSignal) {
+ printf(
+ "WARNING: sanitizers are preventing signal handler installation. "
+ "Trap handlers are disabled.");
+ return false;
+ }
+#endif
+
g_is_default_signal_handler_registered = true;
return true;
#else
@@ -275,6 +286,20 @@ size_t GetRecoveredTrapCount() {
return gRecoveredTrapCount.load(std::memory_order_relaxed);
}
+bool g_is_trap_handler_enabled;
+
+bool EnableTrapHandler(bool use_v8_signal_handler) {
+ if (!V8_TRAP_HANDLER_SUPPORTED) {
+ return false;
+ }
+ if (use_v8_signal_handler) {
+ g_is_trap_handler_enabled = RegisterDefaultSignalHandler();
+ return g_is_trap_handler_enabled;
+ }
+ g_is_trap_handler_enabled = true;
+ return true;
+}
+
} // namespace trap_handler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index d410a19322..01c814fbf7 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -41,20 +41,17 @@ struct ProtectedInstructionData {
const int kInvalidIndex = -1;
-/// Adjusts the base code pointer.
-void UpdateHandlerDataCodePointer(int index, void* base);
-
/// Adds the handler data to the place where the signal handler will find it.
///
/// This returns a number that can be used to identify the handler data to
-/// UpdateHandlerDataCodePointer and ReleaseHandlerData, or -1 on failure.
+/// ReleaseHandlerData, or -1 on failure.
int RegisterHandlerData(void* base, size_t size,
size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions);
/// Removes the data from the master list and frees any memory, if necessary.
-/// TODO(mtrofin): once FLAG_wasm_jit_to_native is not needed, we can switch
-/// to using size_t for index and not need kInvalidIndex.
+/// TODO(mtrofin): We can switch to using size_t for index and not need
+/// kInvalidIndex.
void ReleaseHandlerData(int index);
#if V8_OS_WIN
@@ -66,8 +63,16 @@ void ReleaseHandlerData(int index);
#define THREAD_LOCAL __thread
#endif
+extern bool g_is_trap_handler_enabled;
+// Enables trap handling for WebAssembly bounds checks.
+//
+// use_v8_signal_handler indicates that V8 should install its own signal handler
+// rather than relying on the embedder to do it.
+bool EnableTrapHandler(bool use_v8_signal_handler);
+
inline bool IsTrapHandlerEnabled() {
- return FLAG_wasm_trap_handler && V8_TRAP_HANDLER_SUPPORTED;
+ DCHECK_IMPLIES(g_is_trap_handler_enabled, V8_TRAP_HANDLER_SUPPORTED);
+ return g_is_trap_handler_enabled;
}
extern THREAD_LOCAL int g_thread_in_wasm_code;
@@ -88,6 +93,12 @@ inline void ClearThreadInWasm() {
}
}
+class ThreadInWasmScope {
+ public:
+ ThreadInWasmScope() { SetThreadInWasm(); }
+ ~ThreadInWasmScope() { ClearThreadInWasm(); }
+};
+
bool RegisterDefaultSignalHandler();
V8_EXPORT_PRIVATE void RestoreOriginalSignalHandler();
diff --git a/deps/v8/src/unoptimized-compilation-info.cc b/deps/v8/src/unoptimized-compilation-info.cc
new file mode 100644
index 0000000000..3e5d4cb9f9
--- /dev/null
+++ b/deps/v8/src/unoptimized-compilation-info.cc
@@ -0,0 +1,64 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/unoptimized-compilation-info.h"
+
+#include "src/api.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/debug/debug.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/source-position.h"
+
+namespace v8 {
+namespace internal {
+
+UnoptimizedCompilationInfo::UnoptimizedCompilationInfo(Zone* zone,
+ ParseInfo* parse_info,
+ FunctionLiteral* literal)
+ : flags_(FLAG_untrusted_code_mitigations ? kUntrustedCodeMitigations : 0),
+ zone_(zone),
+ feedback_vector_spec_(zone) {
+ // NOTE: The parse_info passed here represents the global information gathered
+ // during parsing, but does not represent specific details of the actual
+ // function literal being compiled for this OptimizedCompilationInfo. As such,
+ // parse_info->literal() might be different from literal, and only global
+ // details of the script being parsed are relevant to this
+ // OptimizedCompilationInfo.
+ DCHECK_NOT_NULL(literal);
+ literal_ = literal;
+ source_range_map_ = parse_info->source_range_map();
+
+ if (parse_info->is_eval()) MarkAsEval();
+ if (parse_info->is_native()) MarkAsNative();
+ if (parse_info->collect_type_profile()) MarkAsCollectTypeProfile();
+}
+
+DeclarationScope* UnoptimizedCompilationInfo::scope() const {
+ DCHECK_NOT_NULL(literal_);
+ return literal_->scope();
+}
+
+int UnoptimizedCompilationInfo::num_parameters() const {
+ return scope()->num_parameters();
+}
+
+int UnoptimizedCompilationInfo::num_parameters_including_this() const {
+ return scope()->num_parameters() + 1;
+}
+
+bool UnoptimizedCompilationInfo::has_simple_parameters() {
+ return scope()->has_simple_parameters();
+}
+
+SourcePositionTableBuilder::RecordingMode
+UnoptimizedCompilationInfo::SourcePositionRecordingMode() const {
+ return is_native() ? SourcePositionTableBuilder::OMIT_SOURCE_POSITIONS
+ : SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/unoptimized-compilation-info.h b/deps/v8/src/unoptimized-compilation-info.h
new file mode 100644
index 0000000000..6df6d78c2c
--- /dev/null
+++ b/deps/v8/src/unoptimized-compilation-info.h
@@ -0,0 +1,138 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNOPTIMIZED_COMPILATION_INFO_H_
+#define V8_UNOPTIMIZED_COMPILATION_INFO_H_
+
+#include <memory>
+
+#include "src/feedback-vector.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/source-position-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class CoverageInfo;
+class DeclarationScope;
+class FunctionLiteral;
+class Isolate;
+class ParseInfo;
+class SourceRangeMap;
+class Zone;
+
+// UnoptimizedCompilationInfo encapsulates the information needed to compile
+// unoptimized code for a given function, and the results of the compilation.
+class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
+ public:
+ UnoptimizedCompilationInfo(Zone* zone, ParseInfo* parse_info,
+ FunctionLiteral* literal);
+
+ Zone* zone() { return zone_; }
+
+ // Compilation flag accessors.
+
+ void MarkAsEval() { SetFlag(kIsEval); }
+ bool is_eval() const { return GetFlag(kIsEval); }
+
+ void MarkAsNative() { SetFlag(kIsNative); }
+ bool is_native() const { return GetFlag(kIsNative); }
+
+ void MarkAsCollectTypeProfile() { SetFlag(kCollectTypeProfile); }
+ bool collect_type_profile() const { return GetFlag(kCollectTypeProfile); }
+
+ // Accessors for the input data of the function being compiled.
+
+ FunctionLiteral* literal() const { return literal_; }
+ void set_literal(FunctionLiteral* literal) {
+ DCHECK_NOT_NULL(literal);
+ literal_ = literal;
+ }
+
+ DeclarationScope* scope() const;
+
+ bool has_simple_parameters();
+
+ int num_parameters() const;
+ int num_parameters_including_this() const;
+
+ // Accessors for optional compilation features.
+
+ SourcePositionTableBuilder::RecordingMode SourcePositionRecordingMode() const;
+
+ bool has_source_range_map() const { return source_range_map_ != nullptr; }
+ SourceRangeMap* source_range_map() const { return source_range_map_; }
+ void set_source_range_map(SourceRangeMap* source_range_map) {
+ source_range_map_ = source_range_map;
+ }
+
+ bool has_coverage_info() const { return !coverage_info_.is_null(); }
+ Handle<CoverageInfo> coverage_info() const { return coverage_info_; }
+ void set_coverage_info(Handle<CoverageInfo> coverage_info) {
+ coverage_info_ = coverage_info;
+ }
+
+ // Accessors for the output of compilation.
+
+ bool has_bytecode_array() const { return !bytecode_array_.is_null(); }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+ void SetBytecodeArray(Handle<BytecodeArray> bytecode_array) {
+ bytecode_array_ = bytecode_array;
+ }
+
+ bool has_asm_wasm_data() const { return !asm_wasm_data_.is_null(); }
+ Handle<FixedArray> asm_wasm_data() const { return asm_wasm_data_; }
+ void SetAsmWasmData(Handle<FixedArray> asm_wasm_data) {
+ asm_wasm_data_ = asm_wasm_data;
+ }
+
+ FeedbackVectorSpec* feedback_vector_spec() { return &feedback_vector_spec_; }
+
+ private:
+ // Various configuration flags for a compilation, as well as some properties
+ // of the compiled code produced by a compilation.
+ enum Flag {
+ kIsEval = 1 << 0,
+ kIsNative = 1 << 1,
+ kCollectTypeProfile = 1 << 2,
+ kUntrustedCodeMitigations = 1 << 3,
+ };
+
+ void SetFlag(Flag flag) { flags_ |= flag; }
+ bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
+
+ // Compilation flags.
+ unsigned flags_;
+
+ // The zone from which the compilation pipeline working on this
+ // OptimizedCompilationInfo allocates.
+ Zone* zone_;
+
+ // The root AST node of the function literal being compiled.
+ FunctionLiteral* literal_;
+
+ // Used when block coverage is enabled.
+ SourceRangeMap* source_range_map_;
+
+ // Encapsulates coverage information gathered by the bytecode generator.
+ // Needs to be stored on the shared function info once compilation completes.
+ Handle<CoverageInfo> coverage_info_;
+
+ // Holds the bytecode array generated by the interpreter.
+ Handle<BytecodeArray> bytecode_array_;
+
+ // Holds the asm_wasm array generated by the asmjs compiler.
+ Handle<FixedArray> asm_wasm_data_;
+
+ // Holds the feedback vector spec generated during compilation
+ FeedbackVectorSpec feedback_vector_spec_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNOPTIMIZED_COMPILATION_INFO_H_
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index f3e2718fe9..35a9e7b15e 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1579,6 +1579,7 @@ inline uintptr_t GetCurrentStackPosition() {
template <typename V>
static inline V ReadUnalignedValue(const void* p) {
+ ASSERT_TRIVIALLY_COPYABLE(V);
#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
return *reinterpret_cast<const V*>(p);
#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
@@ -1590,6 +1591,7 @@ static inline V ReadUnalignedValue(const void* p) {
template <typename V>
static inline void WriteUnalignedValue(void* p, V value) {
+ ASSERT_TRIVIALLY_COPYABLE(V);
#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
*(reinterpret_cast<V*>(p)) = value;
#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM
@@ -1653,6 +1655,49 @@ static inline void WriteLittleEndianValue(void* p, V value) {
#endif // V8_TARGET_LITTLE_ENDIAN
}
+template <typename V>
+static inline V ByteReverse(V value) {
+ size_t size_of_v = sizeof(value);
+ switch (size_of_v) {
+ case 2:
+#if V8_HAS_BUILTIN_BSWAP16
+ return __builtin_bswap16(value);
+#else
+ return value << 8 | (value >> 8 & 0x00FF);
+#endif
+ case 4:
+#if V8_HAS_BUILTIN_BSWAP32
+ return __builtin_bswap32(value);
+#else
+ {
+ size_t bits_of_v = size_of_v * kBitsPerByte;
+ return value << (bits_of_v - 8) |
+ ((value << (bits_of_v - 24)) & 0x00FF0000) |
+ ((value >> (bits_of_v - 24)) & 0x0000FF00) |
+ ((value >> (bits_of_v - 8)) & 0x00000FF);
+ }
+#endif
+ case 8:
+#if V8_HAS_BUILTIN_BSWAP64
+ return __builtin_bswap64(value);
+#else
+ {
+ size_t bits_of_v = size_of_v * kBitsPerByte;
+ return value << (bits_of_v - 8) |
+ ((value << (bits_of_v - 24)) & 0x00FF000000000000) |
+ ((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
+ ((value << (bits_of_v - 56)) & 0x000000FF00000000) |
+ ((value >> (bits_of_v - 56)) & 0x00000000FF000000) |
+ ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
+ ((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
+ ((value >> (bits_of_v - 8)) & 0x00000000000000FF);
+ }
+#endif
+ default:
+ UNREACHABLE();
+ }
+}
+
// Represents a linked list that threads through the nodes in the linked list.
// Entries in the list are pointers to nodes. The nodes need to have a T**
// next() method that returns the location where the next value is stored.
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index bf4240fa70..e5a40cb51f 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -14,6 +14,8 @@ class StartupData;
namespace internal {
+class Isolate;
+
class V8 : public AllStatic {
public:
// Global actions.
@@ -23,8 +25,9 @@ class V8 : public AllStatic {
// Report process out of memory. Implementation found in api.cc.
// This function will not return, but will terminate the execution.
- static void FatalProcessOutOfMemory(const char* location,
- bool is_heap_oom = false);
+ [[noreturn]] static void FatalProcessOutOfMemory(Isolate* isolate,
+ const char* location,
+ bool is_heap_oom = false);
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/v8memory.h
index 528de5836c..8672c477f8 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/v8memory.h
@@ -49,6 +49,10 @@ class Memory {
return *reinterpret_cast<uintptr_t*>(addr);
}
+ static float& float_at(Address addr) {
+ return *reinterpret_cast<float*>(addr);
+ }
+
static double& double_at(Address addr) {
return *reinterpret_cast<double*>(addr);
}
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 30f6a7a729..030501e308 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -10,12 +10,11 @@
#include "src/api.h"
#include "src/base/logging.h"
#include "src/conversions.h"
-#include "src/factory.h"
#include "src/flags.h"
#include "src/handles-inl.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
#include "src/wasm/wasm-engine.h"
@@ -58,6 +57,9 @@ static size_t BytesNeededForVarint(T value) {
return result;
}
+// Note that some additional tag values are defined in Blink's
+// Source/bindings/core/v8/serialization/SerializationTag.h, which must
+// not clash with values defined here.
enum class SerializationTag : uint8_t {
// version:uint32_t (if at beginning of data, sets version > 0)
kVersion = 0xFF,
@@ -80,6 +82,8 @@ enum class SerializationTag : uint8_t {
// Number represented as a 64-bit double.
// Host byte order is used (N.B. this makes the format non-portable).
kDouble = 'N',
+ // BigInt. Bitfield:uint32_t, then raw digits storage.
+ kBigInt = 'Z',
// byteLength:uint32_t, then raw data
kUtf8String = 'S',
kOneByteString = '"',
@@ -107,6 +111,8 @@ enum class SerializationTag : uint8_t {
kFalseObject = 'x',
// Number object. value:double
kNumberObject = 'n',
+ // BigInt object. Bitfield:uint32_t, then raw digits storage.
+ kBigIntObject = 'z',
// String object, UTF-8 encoding. byteLength:uint32_t, then raw data.
kStringObject = 's',
// Regular expression, UTF-8 encoding. byteLength:uint32_t, raw data,
@@ -253,6 +259,16 @@ void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
WriteRawBytes(chars.begin(), chars.length() * sizeof(uc16));
}
+void ValueSerializer::WriteBigIntContents(BigInt* bigint) {
+ uint32_t bitfield = bigint->GetBitfieldForSerialization();
+ int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
+ WriteVarint<uint32_t>(bitfield);
+ uint8_t* dest;
+ if (ReserveRawBytes(bytelength).To(&dest)) {
+ bigint->SerializeDigits(dest);
+ }
+}
+
void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
uint8_t* dest;
if (ReserveRawBytes(length).To(&dest)) {
@@ -340,6 +356,9 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
case MUTABLE_HEAP_NUMBER_TYPE:
WriteHeapNumber(HeapNumber::cast(*object));
return ThrowIfOutOfMemory();
+ case BIGINT_TYPE:
+ WriteBigInt(BigInt::cast(*object));
+ return ThrowIfOutOfMemory();
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE: {
// Despite being JSReceivers, these have their wrapped buffer serialized
@@ -403,6 +422,11 @@ void ValueSerializer::WriteHeapNumber(HeapNumber* number) {
WriteDouble(number->value());
}
+void ValueSerializer::WriteBigInt(BigInt* bigint) {
+ WriteTag(SerializationTag::kBigInt);
+ WriteBigIntContents(bigint);
+}
+
void ValueSerializer::WriteString(Handle<String> string) {
string = String::Flatten(string);
DisallowHeapAllocation no_gc;
@@ -687,6 +711,9 @@ Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
} else if (inner_value->IsNumber()) {
WriteTag(SerializationTag::kNumberObject);
WriteDouble(inner_value->Number());
+ } else if (inner_value->IsBigInt()) {
+ WriteTag(SerializationTag::kBigIntObject);
+ WriteBigIntContents(BigInt::cast(inner_value));
} else if (inner_value->IsString()) {
WriteTag(SerializationTag::kStringObject);
WriteString(handle(String::cast(inner_value), isolate_));
@@ -1154,6 +1181,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
if (number.IsNothing()) return MaybeHandle<Object>();
return isolate_->factory()->NewNumber(number.FromJust(), pretenure_);
}
+ case SerializationTag::kBigInt:
+ return ReadBigInt();
case SerializationTag::kUtf8String:
return ReadUtf8String();
case SerializationTag::kOneByteString:
@@ -1176,6 +1205,7 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
case SerializationTag::kTrueObject:
case SerializationTag::kFalseObject:
case SerializationTag::kNumberObject:
+ case SerializationTag::kBigIntObject:
case SerializationTag::kStringObject:
return ReadJSValue(tag);
case SerializationTag::kRegExp:
@@ -1223,6 +1253,19 @@ MaybeHandle<String> ValueDeserializer::ReadString() {
return Handle<String>::cast(object);
}
+MaybeHandle<BigInt> ValueDeserializer::ReadBigInt() {
+ if (!FLAG_harmony_bigint) return MaybeHandle<BigInt>();
+ uint32_t bitfield;
+ if (!ReadVarint<uint32_t>().To(&bitfield)) return MaybeHandle<BigInt>();
+ int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
+ Vector<const uint8_t> digits_storage;
+ if (!ReadRawBytes(bytelength).To(&digits_storage)) {
+ return MaybeHandle<BigInt>();
+ }
+ return BigInt::FromSerializedDigits(isolate_, bitfield, digits_storage,
+ pretenure_);
+}
+
MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
uint32_t utf8_length;
Vector<const uint8_t> utf8_bytes;
@@ -1464,6 +1507,14 @@ MaybeHandle<JSValue> ValueDeserializer::ReadJSValue(SerializationTag tag) {
value->set_value(*number_object);
break;
}
+ case SerializationTag::kBigIntObject: {
+ Handle<BigInt> bigint;
+ if (!ReadBigInt().ToHandle(&bigint)) return MaybeHandle<JSValue>();
+ value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
+ isolate_->bigint_function(), pretenure_));
+ value->set_value(*bigint);
+ break;
+ }
case SerializationTag::kStringObject: {
Handle<String> string;
if (!ReadString().ToHandle(&string)) return MaybeHandle<JSValue>();
@@ -1745,7 +1796,6 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
result = isolate_->wasm_engine()->SyncCompile(
isolate_, &thrower, wasm::ModuleWireBytes(wire_bytes));
}
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
uint32_t id = next_id_++;
if (!result.is_null()) {
AddObjectWithID(id, result.ToHandleChecked());
@@ -1861,9 +1911,9 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
key =
isolate_->factory()->InternalizeString(Handle<String>::cast(key));
// Don't reuse |transitions| because it could be stale.
- target = TransitionsAccessor(map).FindTransitionToField(
- Handle<String>::cast(key));
- transitioning = !target.is_null();
+ transitioning = TransitionsAccessor(map)
+ .FindTransitionToField(Handle<String>::cast(key))
+ .ToHandle(&target);
} else {
transitioning = false;
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index f719eb8206..e162ce22d7 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -19,6 +19,7 @@
namespace v8 {
namespace internal {
+class BigInt;
class HeapNumber;
class Isolate;
class JSArrayBuffer;
@@ -55,7 +56,7 @@ class ValueSerializer {
/*
* Serializes a V8 object into the buffer.
*/
- Maybe<bool> WriteObject(Handle<Object> object) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteObject(Handle<Object> object) V8_WARN_UNUSED_RESULT;
/*
* Returns the stored data. This serializer should not be used once the buffer
@@ -107,30 +108,33 @@ class ValueSerializer {
void WriteZigZag(T value);
void WriteOneByteString(Vector<const uint8_t> chars);
void WriteTwoByteString(Vector<const uc16> chars);
+ void WriteBigIntContents(BigInt* bigint);
Maybe<uint8_t*> ReserveRawBytes(size_t bytes);
// Writing V8 objects of various kinds.
void WriteOddball(Oddball* oddball);
void WriteSmi(Smi* smi);
void WriteHeapNumber(HeapNumber* number);
+ void WriteBigInt(BigInt* bigint);
void WriteString(Handle<String> string);
- Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver) WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSArray(Handle<JSArray> array) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSReceiver(Handle<JSReceiver> receiver)
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSObjectSlow(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSArray(Handle<JSArray> array) V8_WARN_UNUSED_RESULT;
void WriteJSDate(JSDate* date);
- Maybe<bool> WriteJSValue(Handle<JSValue> value) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSValue(Handle<JSValue> value) V8_WARN_UNUSED_RESULT;
void WriteJSRegExp(JSRegExp* regexp);
- Maybe<bool> WriteJSMap(Handle<JSMap> map) WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSMap(Handle<JSMap> map) V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteJSSet(Handle<JSSet> map) V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
Maybe<bool> WriteWasmModule(Handle<WasmModuleObject> object)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
Maybe<bool> WriteWasmMemory(Handle<WasmMemoryObject> object)
- WARN_UNUSED_RESULT;
- Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
+ Maybe<bool> WriteHostObject(Handle<JSObject> object) V8_WARN_UNUSED_RESULT;
/*
* Reads the specified keys from the object and writes key-value pairs to the
@@ -138,7 +142,7 @@ class ValueSerializer {
* if some keys are not own properties when accessed.
*/
Maybe<uint32_t> WriteJSObjectPropertiesSlow(
- Handle<JSObject> object, Handle<FixedArray> keys) WARN_UNUSED_RESULT;
+ Handle<JSObject> object, Handle<FixedArray> keys) V8_WARN_UNUSED_RESULT;
/*
* Asks the delegate to handle an error that occurred during data cloning, by
@@ -184,7 +188,7 @@ class ValueDeserializer {
/*
* Runs version detection logic, which may fail if the format is invalid.
*/
- Maybe<bool> ReadHeader() WARN_UNUSED_RESULT;
+ Maybe<bool> ReadHeader() V8_WARN_UNUSED_RESULT;
/*
* Reads the underlying wire format version. Likely mostly to be useful to
@@ -196,7 +200,7 @@ class ValueDeserializer {
/*
* Deserializes a V8 object from the buffer.
*/
- MaybeHandle<Object> ReadObject() WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadObject() V8_WARN_UNUSED_RESULT;
/*
* Reads an object, consuming the entire buffer.
@@ -206,7 +210,7 @@ class ValueDeserializer {
* deserializing, with the contents of objects and arrays provided first.
*/
MaybeHandle<Object> ReadObjectUsingEntireBufferForLegacyFormat()
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
/*
* Accepts the array buffer corresponding to the one passed previously to
@@ -219,63 +223,64 @@ class ValueDeserializer {
* Publicly exposed wire format writing methods.
* These are intended for use within the delegate's WriteHostObject method.
*/
- bool ReadUint32(uint32_t* value) WARN_UNUSED_RESULT;
- bool ReadUint64(uint64_t* value) WARN_UNUSED_RESULT;
- bool ReadDouble(double* value) WARN_UNUSED_RESULT;
- bool ReadRawBytes(size_t length, const void** data) WARN_UNUSED_RESULT;
+ bool ReadUint32(uint32_t* value) V8_WARN_UNUSED_RESULT;
+ bool ReadUint64(uint64_t* value) V8_WARN_UNUSED_RESULT;
+ bool ReadDouble(double* value) V8_WARN_UNUSED_RESULT;
+ bool ReadRawBytes(size_t length, const void** data) V8_WARN_UNUSED_RESULT;
void set_expect_inline_wasm(bool expect_inline_wasm) {
expect_inline_wasm_ = expect_inline_wasm;
}
private:
// Reading the wire format.
- Maybe<SerializationTag> PeekTag() const WARN_UNUSED_RESULT;
+ Maybe<SerializationTag> PeekTag() const V8_WARN_UNUSED_RESULT;
void ConsumeTag(SerializationTag peeked_tag);
- Maybe<SerializationTag> ReadTag() WARN_UNUSED_RESULT;
+ Maybe<SerializationTag> ReadTag() V8_WARN_UNUSED_RESULT;
template <typename T>
- Maybe<T> ReadVarint() WARN_UNUSED_RESULT;
+ Maybe<T> ReadVarint() V8_WARN_UNUSED_RESULT;
template <typename T>
- Maybe<T> ReadZigZag() WARN_UNUSED_RESULT;
- Maybe<double> ReadDouble() WARN_UNUSED_RESULT;
- Maybe<Vector<const uint8_t>> ReadRawBytes(int size) WARN_UNUSED_RESULT;
+ Maybe<T> ReadZigZag() V8_WARN_UNUSED_RESULT;
+ Maybe<double> ReadDouble() V8_WARN_UNUSED_RESULT;
+ Maybe<Vector<const uint8_t>> ReadRawBytes(int size) V8_WARN_UNUSED_RESULT;
bool expect_inline_wasm() const { return expect_inline_wasm_; }
// Reads a string if it matches the one provided.
// Returns true if this was the case. Otherwise, nothing is consumed.
- bool ReadExpectedString(Handle<String> expected) WARN_UNUSED_RESULT;
+ bool ReadExpectedString(Handle<String> expected) V8_WARN_UNUSED_RESULT;
// Like ReadObject, but skips logic for special cases in simulating the
// "stack machine".
- MaybeHandle<Object> ReadObjectInternal() WARN_UNUSED_RESULT;
+ MaybeHandle<Object> ReadObjectInternal() V8_WARN_UNUSED_RESULT;
// Reads a string intended to be part of a more complicated object.
// Before v12, these are UTF-8 strings. After, they can be any encoding
// permissible for a string (with the relevant tag).
- MaybeHandle<String> ReadString() WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadString() V8_WARN_UNUSED_RESULT;
// Reading V8 objects of specific kinds.
// The tag is assumed to have already been read.
- MaybeHandle<String> ReadUtf8String() WARN_UNUSED_RESULT;
- MaybeHandle<String> ReadOneByteString() WARN_UNUSED_RESULT;
- MaybeHandle<String> ReadTwoByteString() WARN_UNUSED_RESULT;
- MaybeHandle<JSObject> ReadJSObject() WARN_UNUSED_RESULT;
- MaybeHandle<JSArray> ReadSparseJSArray() WARN_UNUSED_RESULT;
- MaybeHandle<JSArray> ReadDenseJSArray() WARN_UNUSED_RESULT;
- MaybeHandle<JSDate> ReadJSDate() WARN_UNUSED_RESULT;
- MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) WARN_UNUSED_RESULT;
- MaybeHandle<JSRegExp> ReadJSRegExp() WARN_UNUSED_RESULT;
- MaybeHandle<JSMap> ReadJSMap() WARN_UNUSED_RESULT;
- MaybeHandle<JSSet> ReadJSSet() WARN_UNUSED_RESULT;
+ MaybeHandle<BigInt> ReadBigInt() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadUtf8String() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadOneByteString() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<String> ReadTwoByteString() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadJSObject() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadSparseJSArray() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSArray> ReadDenseJSArray() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSDate> ReadJSDate() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSValue> ReadJSValue(SerializationTag tag) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSRegExp> ReadJSRegExp() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSMap> ReadJSMap() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSSet> ReadJSSet() V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBuffer> ReadJSArrayBuffer(bool is_shared)
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBuffer> ReadTransferredJSArrayBuffer()
- WARN_UNUSED_RESULT;
+ V8_WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
- Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
- MaybeHandle<JSObject> ReadWasmModule() WARN_UNUSED_RESULT;
- MaybeHandle<JSObject> ReadWasmModuleTransfer() WARN_UNUSED_RESULT;
- MaybeHandle<WasmMemoryObject> ReadWasmMemory() WARN_UNUSED_RESULT;
- MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
+ Handle<JSArrayBuffer> buffer) V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadWasmModule() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadWasmModuleTransfer() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<WasmMemoryObject> ReadWasmMemory() V8_WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadHostObject() V8_WARN_UNUSED_RESULT;
/*
* Reads key-value pairs into the object until the specified end tag is
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
index 31ee0795d4..f242f3c044 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/visitors.h
@@ -11,6 +11,7 @@ namespace v8 {
namespace internal {
class CodeDataContainer;
+class MaybeObject;
class Object;
#define ROOT_ID_LIST(V) \
@@ -87,11 +88,16 @@ class ObjectVisitor BASE_EMBEDDED {
// [start, end). Any or all of the values may be modified on return.
virtual void VisitPointers(HeapObject* host, Object** start,
Object** end) = 0;
+ virtual void VisitPointers(HeapObject* host, MaybeObject** start,
+ MaybeObject** end) = 0;
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(HeapObject* host, Object** p) {
VisitPointers(host, p, p + 1);
}
+ virtual void VisitPointer(HeapObject* host, MaybeObject** p) {
+ VisitPointers(host, p, p + 1);
+ }
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects ...
@@ -113,6 +119,9 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits an (encoded) internal reference.
virtual void VisitInternalReference(Code* host, RelocInfo* rinfo) {}
+
+ // Visits an off-heap target in the instruction stream.
+ virtual void VisitOffHeapTarget(Code* host, RelocInfo* rinfo) {}
};
} // namespace internal
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 2b6cc5c057..5e51225ab0 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -2,10 +2,13 @@ set noparent
ahaas@chromium.org
bbudge@chromium.org
+binji@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
-gdeepti@chromium.org
eholk@chromium.org
+gdeepti@chromium.org
+kschimpf@chromium.org
+mstarzinger@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index ef8893f005..91df3f0528 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
- BAILOUT("LoadFromContext");
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
+ BAILOUT("LoadFromInstance");
}
-void LiftoffAssembler::SpillContext(Register context) {
- BAILOUT("SpillContext");
+void LiftoffAssembler::SpillInstance(Register instance) {
+ BAILOUT("SpillInstance");
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- BAILOUT("FillContextInto");
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -101,26 +101,36 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop"); \
+ BAILOUT("gp binop: " #name); \
+ }
+#define UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ BAILOUT("i64 binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop"); \
+ BAILOUT("gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop"); \
+ BAILOUT("fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ BAILOUT("fp unop: " #name); \
+ }
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i32 shiftop: " #name); \
}
-#define UNIMPLEMENTED_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
- LiftoffRegList pinned) { \
- BAILOUT("shiftop"); \
+#define UNIMPLEMENTED_I64_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i64 shiftop: " #name); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -129,47 +139,96 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_SHIFTOP(i32_shl)
-UNIMPLEMENTED_SHIFTOP(i32_sar)
-UNIMPLEMENTED_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
+UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_sub)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
+UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_BINOP(f32_div)
+UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_UNOP(f32_ceil)
+UNIMPLEMENTED_FP_UNOP(f32_floor)
+UNIMPLEMENTED_FP_UNOP(f32_trunc)
+UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_BINOP(f64_div)
+UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
+UNIMPLEMENTED_FP_UNOP(f64_ceil)
+UNIMPLEMENTED_FP_UNOP(f64_floor)
+UNIMPLEMENTED_FP_UNOP(f64_trunc)
+UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I64_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_SHIFTOP
+#undef UNIMPLEMENTED_I32_SHIFTOP
+#undef UNIMPLEMENTED_I64_SHIFTOP
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_type_conversion");
+ return true;
+}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
BAILOUT("emit_cond_jump");
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ BAILOUT("emit_i32_eqz");
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
BAILOUT("emit_i32_set_cond");
}
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ BAILOUT("emit_i64_eqz");
+}
+
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ BAILOUT("emit_i64_set_cond");
+}
+
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -203,25 +262,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ BAILOUT("LoadCCallOutArgument");
+}
+
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
+void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 09bce6d450..c997e9706d 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
- BAILOUT("LoadFromContext");
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
+ BAILOUT("LoadFromInstance");
}
-void LiftoffAssembler::SpillContext(Register context) {
- BAILOUT("SpillContext");
+void LiftoffAssembler::SpillInstance(Register instance) {
+ BAILOUT("SpillInstance");
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- BAILOUT("FillContextInto");
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -101,26 +101,36 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop"); \
+ BAILOUT("gp binop: " #name); \
+ }
+#define UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ BAILOUT("i64 binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop"); \
+ BAILOUT("gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop"); \
+ BAILOUT("fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ BAILOUT("fp unop: " #name); \
+ }
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i32 shiftop: " #name); \
}
-#define UNIMPLEMENTED_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
- LiftoffRegList pinned) { \
- BAILOUT("shiftop"); \
+#define UNIMPLEMENTED_I64_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i64 shiftop: " #name); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -129,47 +139,99 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_SHIFTOP(i32_shl)
-UNIMPLEMENTED_SHIFTOP(i32_sar)
-UNIMPLEMENTED_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
+UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_sub)
+UNIMPLEMENTED_I64_BINOP(i64_and)
+UNIMPLEMENTED_I64_BINOP(i64_or)
+UNIMPLEMENTED_I64_BINOP(i64_xor)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
+UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_BINOP(f32_div)
+UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_UNOP(f32_ceil)
+UNIMPLEMENTED_FP_UNOP(f32_floor)
+UNIMPLEMENTED_FP_UNOP(f32_trunc)
+UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_BINOP(f64_div)
+UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
+UNIMPLEMENTED_FP_UNOP(f64_ceil)
+UNIMPLEMENTED_FP_UNOP(f64_floor)
+UNIMPLEMENTED_FP_UNOP(f64_trunc)
+UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I64_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_SHIFTOP
+#undef UNIMPLEMENTED_I32_SHIFTOP
+#undef UNIMPLEMENTED_I64_SHIFTOP
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_type_conversion");
+ return true;
+}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
BAILOUT("emit_cond_jump");
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ BAILOUT("emit_i32_eqz");
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
BAILOUT("emit_i32_set_cond");
}
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ BAILOUT("emit_i64_eqz");
+}
+
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ BAILOUT("emit_i64_set_cond");
+}
+
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -203,25 +265,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ BAILOUT("LoadCCallOutArgument");
+}
+
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
+void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 35943554cc..82b8e7e816 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -14,11 +14,15 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define REQUIRE_CPU_FEATURE(name) \
+ if (!CpuFeatures::IsSupported(name)) return bailout("no " #name); \
+ CpuFeatureScope feature(this, name);
+
namespace liftoff {
-// ebp-8 holds the stack marker, ebp-16 is the wasm context, first stack slot
-// is located at ebp-24.
-constexpr int32_t kConstantStackSpace = 16;
+// ebp-4 holds the stack marker, ebp-8 is the instance parameter, first stack
+// slot is located at ebp-16.
+constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
@@ -33,7 +37,7 @@ inline Operand GetHalfStackSlot(uint32_t half_index) {
}
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
-inline Operand GetContextOperand() { return Operand(ebp, -16); }
+inline Operand GetInstanceOperand() { return Operand(ebp, -8); }
static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf<eax, ecx, edx, ebx>()>();
@@ -42,8 +46,48 @@ static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
"kByteRegs only contains gp cache registers");
// Use this register to store the address of the last argument pushed on the
-// stack for a call to C.
-static constexpr Register kCCallLastArgAddrReg = eax;
+// stack for a call to C. This register must be callee saved according to the c
+// calling convention.
+static constexpr Register kCCallLastArgAddrReg = ebx;
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->mov(dst.gp(), src);
+ break;
+ case kWasmF32:
+ assm->movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ assm->movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->push(reg.gp());
+ break;
+ case kWasmI64:
+ assm->push(reg.high_gp());
+ assm->push(reg.low_gp());
+ break;
+ case kWasmF32:
+ assm->sub(esp, Immediate(sizeof(float)));
+ assm->movss(Operand(esp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ assm->sub(esp, Immediate(sizeof(double)));
+ assm->movsd(Operand(esp, 0), reg.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
} // namespace liftoff
@@ -93,20 +137,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
DCHECK_LE(offset, kMaxInt);
- mov(dst, liftoff::GetContextOperand());
+ mov(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::SpillContext(Register context) {
- mov(liftoff::GetContextOperand(), context);
+void LiftoffAssembler::SpillInstance(Register instance) {
+ mov(liftoff::GetInstanceOperand(), instance);
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- mov(dst, liftoff::GetContextOperand());
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ mov(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -258,19 +302,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
Operand src(ebp, kPointerSize * (caller_slot_idx + 1));
- switch (type) {
- case kWasmI32:
- mov(dst.gp(), src);
- break;
- case kWasmF32:
- movss(dst.fp(), src);
- break;
- case kWasmF64:
- movsd(dst.fp(), src);
- break;
- default:
- UNREACHABLE();
- }
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
@@ -290,10 +322,9 @@ void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
ValueType type) {
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
- LiftoffRegister dst =
- reg.is_pair()
- ? LiftoffRegister::ForPair(LiftoffRegister(eax), LiftoffRegister(edx))
- : reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
+ LiftoffRegister dst = reg.is_pair() ? LiftoffRegister::ForPair(eax, edx)
+ : reg.is_gp() ? LiftoffRegister(eax)
+ : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg, type);
}
@@ -324,7 +355,7 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
break;
case kWasmI64:
mov(dst, reg.low_gp());
- mov(liftoff::GetHalfStackSlot(2 * index + 1), reg.high_gp());
+ mov(liftoff::GetHalfStackSlot(2 * index - 1), reg.high_gp());
break;
case kWasmF32:
movss(dst, reg.fp());
@@ -348,13 +379,11 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(dst, Immediate(low_word));
- mov(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ mov(liftoff::GetHalfStackSlot(2 * index - 1), Immediate(high_word));
break;
}
- case kWasmF32:
- mov(dst, Immediate(value.to_f32_boxed().get_bits()));
- break;
default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
UNREACHABLE();
}
}
@@ -368,7 +397,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
break;
case kWasmI64:
mov(reg.low_gp(), src);
- mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index + 1));
+ mov(reg.high_gp(), liftoff::GetHalfStackSlot(2 * index - 1));
break;
case kWasmF32:
movss(reg.fp(), src);
@@ -425,37 +454,37 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
- Register lhs, Register rhs,
+ Register src, Register amount,
void (Assembler::*emit_shift)(Register),
LiftoffRegList pinned) {
pinned.set(dst);
- pinned.set(lhs);
- pinned.set(rhs);
+ pinned.set(src);
+ pinned.set(amount);
// If dst is ecx, compute into a tmp register first, then move to ecx.
if (dst == ecx) {
Register tmp = assm->GetUnusedRegister(kGpReg, pinned).gp();
- assm->mov(tmp, lhs);
- if (rhs != ecx) assm->mov(ecx, rhs);
+ assm->mov(tmp, src);
+ if (amount != ecx) assm->mov(ecx, amount);
(assm->*emit_shift)(tmp);
assm->mov(ecx, tmp);
return;
}
- // Move rhs into ecx. If ecx is in use, move its content to a tmp register
- // first. If lhs is ecx, lhs is now the tmp register.
+ // Move amount into ecx. If ecx is in use, move its content to a tmp register
+ // first. If src is ecx, src is now the tmp register.
Register tmp_reg = no_reg;
- if (rhs != ecx) {
+ if (amount != ecx) {
if (assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx))) {
tmp_reg = assm->GetUnusedRegister(kGpReg, pinned).gp();
assm->mov(tmp_reg, ecx);
- if (lhs == ecx) lhs = tmp_reg;
+ if (src == ecx) src = tmp_reg;
}
- assm->mov(ecx, rhs);
+ assm->mov(ecx, amount);
}
// Do the actual shift.
- if (dst != lhs) assm->mov(dst, lhs);
+ if (dst != src) assm->mov(dst, src);
(assm->*emit_shift)(dst);
// Restore ecx if needed.
@@ -463,19 +492,22 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_shl(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shl_cl, pinned);
+ liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::shl_cl,
+ pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_sar(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sar_cl, pinned);
+ liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::sar_cl,
+ pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shr_cl, pinned);
+ liftoff::EmitShiftOperation(this, dst, src, amount, &Assembler::shr_cl,
+ pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -519,9 +551,113 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
return true;
}
-void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
- Register rhs) {
- emit_i32_add(dst, lhs, rhs);
+namespace liftoff {
+template <void (Assembler::*op)(Register, Register),
+ void (Assembler::*op_with_carry)(Register, Register)>
+inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ // First, compute the low half of the result, potentially into a temporary dst
+ // register if {dst.low_gp()} equals {rhs.low_gp()} or any register we need to
+ // keep alive for computing the upper half.
+ LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp(), rhs);
+ Register dst_low = keep_alive.has(dst.low_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.low_gp();
+
+ if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp());
+ (assm->*op)(dst_low, rhs.low_gp());
+
+ // Now compute the upper half, while keeping alive the previous result.
+ keep_alive = LiftoffRegList::ForRegs(dst_low, rhs.high_gp());
+ Register dst_high = keep_alive.has(dst.high_gp())
+ ? assm->GetUnusedRegister(kGpReg, keep_alive).gp()
+ : dst.high_gp();
+
+ if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
+ (assm->*op_with_carry)(dst_high, rhs.high_gp());
+
+ // If necessary, move result into the right registers.
+ LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
+ if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::OpWithCarry<&Assembler::add, &Assembler::adc>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::OpWithCarry<&Assembler::sub, &Assembler::sbb>(this, dst, lhs, rhs);
+}
+
+namespace liftoff {
+inline bool PairContains(LiftoffRegister pair, Register reg) {
+ return pair.low_gp() == reg || pair.high_gp() == reg;
+}
+
+inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg,
+ Register new_reg) {
+ if (pair.low_gp() == old_reg) {
+ return LiftoffRegister::ForPair(new_reg, pair.high_gp());
+ }
+ if (pair.high_gp() == old_reg) {
+ return LiftoffRegister::ForPair(pair.low_gp(), new_reg);
+ }
+ return pair;
+}
+
+inline void Emit64BitShiftOperation(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
+ Register amount, void (TurboAssembler::*emit_shift)(Register, Register),
+ LiftoffRegList pinned) {
+ pinned.set(dst);
+ pinned.set(src);
+ pinned.set(amount);
+ // If {dst} contains {ecx}, replace it by an unused register, which is then
+ // moved to {ecx} in the end.
+ Register ecx_replace = no_reg;
+ if (PairContains(dst, ecx)) {
+ ecx_replace = pinned.set(assm->GetUnusedRegister(kGpReg, pinned)).gp();
+ dst = ReplaceInPair(dst, ecx, ecx_replace);
+ // If {amount} needs to be moved to {ecx}, but {ecx} is in use (and not part
+ // of {dst}, hence overwritten anyway), move {ecx} to a tmp register and
+ // restore it at the end.
+ } else if (amount != ecx &&
+ assm->cache_state()->is_used(LiftoffRegister(ecx))) {
+ ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
+ assm->mov(ecx_replace, ecx);
+ }
+
+ assm->ParallelRegisterMove(
+ {{dst, src, kWasmI64},
+ {LiftoffRegister{ecx}, LiftoffRegister{amount}, kWasmI32}});
+
+ // Do the actual shift.
+ (assm->*emit_shift)(dst.high_gp(), dst.low_gp());
+
+ // Restore {ecx} if needed.
+ if (ecx_replace != no_reg) assm->mov(ecx, ecx_replace);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::ShlPair_cl, pinned);
+}
+
+void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::SarPair_cl, pinned);
+}
+
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::ShrPair_cl, pinned);
}
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
@@ -565,6 +701,32 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
+void LiftoffAssembler::emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vdivss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ divss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ divss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ Andps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit - 1);
+ Andps(dst, src);
+ }
+}
+
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
@@ -576,6 +738,31 @@ void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
}
}
+void LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundss(dst, src, kRoundUp);
+}
+
+void LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundss(dst, src, kRoundDown);
+}
+
+void LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundss(dst, src, kRoundToZero);
+}
+
+void LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundss(dst, src, kRoundToNearest);
+}
+
+void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) {
+ Sqrtss(dst, src);
+}
+
void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -617,6 +804,32 @@ void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
+void LiftoffAssembler::emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vdivsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ divsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ divsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ Andpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit - 1);
+ Andpd(dst, src);
+ }
+}
+
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
@@ -628,8 +841,99 @@ void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
}
}
+void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundsd(dst, src, kRoundUp);
+}
+
+void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundsd(dst, src, kRoundDown);
+}
+
+void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundsd(dst, src, kRoundToZero);
+}
+
+void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ roundsd(dst, src, kRoundToNearest);
+}
+
+void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
+ Sqrtsd(dst, src);
+}
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ if (dst.gp() != src.low_gp()) mov(dst.gp(), src.low_gp());
+ return true;
+ case kExprI32ReinterpretF32:
+ Movd(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
+ mov(dst.high_gp(), src.gp());
+ sar(dst.high_gp(), 31);
+ return true;
+ case kExprI64UConvertI32:
+ if (dst.low_gp() != src.gp()) mov(dst.low_gp(), src.gp());
+ xor_(dst.high_gp(), dst.high_gp());
+ return true;
+ case kExprI64ReinterpretF64:
+ // Push src to the stack.
+ sub(esp, Immediate(8));
+ movsd(Operand(esp, 0), src.fp());
+ // Pop to dst.
+ pop(dst.low_gp());
+ pop(dst.high_gp());
+ return true;
+ case kExprF32SConvertI32:
+ cvtsi2ss(dst.fp(), src.gp());
+ return true;
+ case kExprF32UConvertI32: {
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(dst, src);
+ Register scratch = GetUnusedRegister(kGpReg, pinned).gp();
+ Cvtui2ss(dst.fp(), src.gp(), scratch);
+ return true;
+ }
+ case kExprF32ConvertF64:
+ cvtsd2ss(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ Movd(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32:
+ Cvtsi2sd(dst.fp(), src.gp());
+ return true;
+ case kExprF64UConvertI32:
+ LoadUint32(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ cvtss2sd(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ // Push src to the stack.
+ push(src.high_gp());
+ push(src.low_gp());
+ // Pop to dst.
+ movsd(dst.fp(), Operand(esp, 0));
+ add(esp, Immediate(8));
+ return true;
+ default:
+ return false;
+ }
+}
+
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
@@ -650,51 +954,140 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
}
namespace liftoff {
-inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
- Register tmp_byte_reg = dst;
- // Only the lower 4 registers can be addressed as 8-bit registers.
- if (!dst.is_byte_register()) {
- LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
- // {GetUnusedRegister()} may insert move instructions to spill registers to
- // the stack. This is OK because {mov} does not change the status flags.
- tmp_byte_reg = assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
- }
+// Get a temporary byte register, using {candidate} if possible.
+// Might spill, but always keeps status flags intact.
+inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
+ if (candidate.is_byte_register()) return candidate;
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(candidate);
+ // {GetUnusedRegister()} may insert move instructions to spill registers to
+ // the stack. This is OK because {mov} does not change the status flags.
+ return assm->GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+}
+
+// Setcc into dst register, given a scratch byte register (might be the same as
+// dst). Never spills.
+inline void setcc_32_no_spill(LiftoffAssembler* assm, Condition cond,
+ Register dst, Register tmp_byte_reg) {
assm->setcc(cond, tmp_byte_reg);
assm->movzx_b(dst, tmp_byte_reg);
}
+
+// Setcc into dst register (no contraints). Might spill.
+inline void setcc_32(LiftoffAssembler* assm, Condition cond, Register dst) {
+ Register tmp_byte_reg = GetTmpByteRegister(assm, dst);
+ setcc_32_no_spill(assm, cond, dst, tmp_byte_reg);
+}
+
} // namespace liftoff
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ test(src, src);
+ liftoff::setcc_32(this, equal, dst);
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- if (rhs != no_reg) {
- cmp(lhs, rhs);
+ cmp(lhs, rhs);
+ liftoff::setcc_32(this, cond, dst);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ // Compute the OR of both registers in the src pair, using dst as scratch
+ // register. Then check whether the result is equal to zero.
+ if (src.low_gp() == dst) {
+ or_(dst, src.high_gp());
} else {
- test(lhs, lhs);
+ if (src.high_gp() != dst) mov(dst, src.high_gp());
+ or_(dst, src.low_gp());
}
- liftoff::setcc_32(this, cond, dst);
+ liftoff::setcc_32(this, equal, dst);
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
- DoubleRegister rhs) {
+namespace liftoff {
+inline Condition cond_make_unsigned(Condition cond) {
+ switch (cond) {
+ case kSignedLessThan:
+ return kUnsignedLessThan;
+ case kSignedLessEqual:
+ return kUnsignedLessEqual;
+ case kSignedGreaterThan:
+ return kUnsignedGreaterThan;
+ case kSignedGreaterEqual:
+ return kUnsignedGreaterEqual;
+ default:
+ return cond;
+ }
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Get the tmp byte register out here, such that we don't conditionally spill
+ // (this cannot be reflected in the cache state).
+ Register tmp_byte_reg = liftoff::GetTmpByteRegister(this, dst);
+
+ // For signed i64 comparisons, we still need to use unsigned comparison for
+ // the low word (the only bit carrying signedness information is the MSB in
+ // the high word).
+ Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
+ Label setcc;
+ Label cont;
+ // Compare high word first. If it differs, use if for the setcc. If it's
+ // equal, compare the low word and use that for setcc.
+ cmp(lhs.high_gp(), rhs.high_gp());
+ j(not_equal, &setcc, Label::kNear);
+ cmp(lhs.low_gp(), rhs.low_gp());
+ if (unsigned_cond != cond) {
+ // If the condition predicate for the low differs from that for the high
+ // word, emit a separete setcc sequence for the low word.
+ liftoff::setcc_32_no_spill(this, unsigned_cond, dst, tmp_byte_reg);
+ jmp(&cont);
+ }
+ bind(&setcc);
+ liftoff::setcc_32_no_spill(this, cond, dst, tmp_byte_reg);
+ bind(&cont);
+}
+
+namespace liftoff {
+template <void (Assembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
+ DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
Label not_nan;
- ucomiss(lhs, rhs);
- // IF PF is one, one of the operands was Nan. This needs special handling.
- j(parity_odd, &not_nan, Label::kNear);
+ // Get the tmp byte register out here, such that we don't conditionally spill
+ // (this cannot be reflected in the cache state).
+ Register tmp_byte_reg = GetTmpByteRegister(assm, dst);
+
+ (assm->*cmp_op)(lhs, rhs);
+ // If PF is one, one of the operands was Nan. This needs special handling.
+ assm->j(parity_odd, &not_nan, Label::kNear);
// Return 1 for f32.ne, 0 for all other cases.
if (cond == not_equal) {
- mov(dst, Immediate(1));
+ assm->mov(dst, Immediate(1));
} else {
- xor_(dst, dst);
+ assm->xor_(dst, dst);
}
- jmp(&cont, Label::kNear);
- bind(&not_nan);
+ assm->jmp(&cont, Label::kNear);
+ assm->bind(&not_nan);
- liftoff::setcc_32(this, cond, dst);
- bind(&cont);
+ setcc_32_no_spill(assm, cond, dst, tmp_byte_reg);
+ assm->bind(&cont);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatSetCond<&Assembler::ucomiss>(this, cond, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatSetCond<&Assembler::ucomisd>(this, cond, dst, lhs, rhs);
}
void LiftoffAssembler::StackCheck(Label* ool_code) {
@@ -722,7 +1115,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
DCHECK_EQ(kLowWord, half);
push(liftoff::GetHalfStackSlot(2 * src_index - 1));
}
- push(liftoff::GetHalfStackSlot(2 * src_index +
+ push(liftoff::GetHalfStackSlot(2 * src_index -
(half == kLowWord ? 0 : 1)));
break;
case VarState::kRegister:
@@ -743,22 +1136,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
- switch (type) {
- case kWasmI32:
- push(reg.gp());
- break;
- case kWasmF32:
- sub(esp, Immediate(sizeof(float)));
- movss(Operand(esp, 0), reg.fp());
- break;
- case kWasmF64:
- sub(esp, Immediate(sizeof(double)));
- movsd(Operand(esp, 0), reg.fp());
- break;
- default:
- // Also kWasmI64 is unreachable, as it will always be pushed as two halfs.
- UNREACHABLE();
- }
+ liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -806,42 +1184,67 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize));
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- for (size_t param = 0; param < num_params; ++param) {
- push(args[param]);
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
+ int pushed_bytes = 0;
+ for (ValueType param_type : sig->parameters()) {
+ pushed_bytes += RoundUp<kPointerSize>(WasmOpcodes::MemSize(param_type));
+ liftoff::push(this, *args++, param_type);
+ }
+ if (out_argument_type != kWasmStmt) {
+ int size = RoundUp<kPointerSize>(WasmOpcodes::MemSize(out_argument_type));
+ sub(esp, Immediate(size));
+ pushed_bytes += size;
}
- mov(liftoff::kCCallLastArgAddrReg, esp);
- constexpr Register kScratch = ebx;
+ // Save the original sp (before the first push), such that we can later
+ // compute pointers to the pushed values. Do this only *after* pushing the
+ // values, because {kCCallLastArgAddrReg} might collide with an arg register.
+ lea(liftoff::kCCallLastArgAddrReg, Operand(esp, pushed_bytes));
+ constexpr Register kScratch = ecx;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
- PrepareCallCFunction(num_params, kScratch);
+ int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
+ (out_argument_type != kWasmStmt);
+ PrepareCallCFunction(num_c_call_arguments, kScratch);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
+ lea(dst, Operand(liftoff::kCCallLastArgAddrReg, -param_byte_offset));
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
- int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
- lea(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
+ static constexpr Register kScratch = ecx;
+ SetCCallRegParamAddr(kScratch, param_byte_offset, type);
+ mov(Operand(esp, stack_param_idx * kPointerSize), kScratch);
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
- constexpr Register kScratch = ebx;
- static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
- int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
- lea(kScratch, Operand(liftoff::kCCallLastArgAddrReg, offset));
- mov(Operand(esp, param_idx * kPointerSize), kScratch);
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
+ Operand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
CallCFunction(ext_ref, static_cast<int>(num_params));
}
+void LiftoffAssembler::FinishCCall() {
+ mov(esp, liftoff::kCCallLastArgAddrReg);
+}
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
wasm_call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set context to zero.
+ // Set instance to zero.
xor_(esi, esi);
CallRuntimeDelayed(zone, fid);
}
@@ -866,6 +1269,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
+#undef REQUIRE_CPU_FEATURE
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 09b8229dc1..9f910d59fc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -205,7 +205,7 @@ class StackTransferRecipe {
DCHECK_EQ(kWasmI64, src.type());
switch (src.loc()) {
case VarState::kStack:
- LoadI64HalfStackSlot(dst, 2 * index + (half == kLowWord ? 0 : 1));
+ LoadI64HalfStackSlot(dst, 2 * index - (half == kLowWord ? 0 : 1));
break;
case VarState::kRegister: {
LiftoffRegister src_half =
@@ -338,7 +338,7 @@ void LiftoffAssembler::CacheState::Split(const CacheState& source) {
// TODO(clemensh): Provide a reasonably sized buffer, based on wasm function
// size.
LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
- : TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
+ : TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kNo) {}
LiftoffAssembler::~LiftoffAssembler() {
if (num_locals_ > kInlineLocalTypes) {
@@ -346,47 +346,23 @@ LiftoffAssembler::~LiftoffAssembler() {
}
}
-LiftoffRegister LiftoffAssembler::GetBinaryOpTargetRegister(
- RegClass rc, LiftoffRegList pinned) {
- auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
- if (slot_lhs.is_reg() && GetNumUses(slot_lhs.reg()) == 1) {
- DCHECK_EQ(rc, slot_lhs.reg().reg_class());
- return slot_lhs.reg();
- }
- auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
- if (slot_rhs.is_reg() && GetNumUses(slot_rhs.reg()) == 1) {
- DCHECK_EQ(rc, slot_rhs.reg().reg_class());
- return slot_rhs.reg();
- }
- return GetUnusedRegister(rc, pinned);
-}
-
-LiftoffRegister LiftoffAssembler::GetUnaryOpTargetRegister(
- RegClass rc, LiftoffRegList pinned) {
- auto& slot_src = cache_state_.stack_state.back();
- if (slot_src.is_reg() && GetNumUses(slot_src.reg()) == 1) {
- DCHECK_EQ(rc, slot_src.reg().reg_class());
- return slot_src.reg();
- }
- return GetUnusedRegister(rc, pinned);
-}
-
-LiftoffRegister LiftoffAssembler::PopToRegister(RegClass rc,
- LiftoffRegList pinned) {
+LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
switch (slot.loc()) {
case VarState::kStack: {
- LiftoffRegister reg = GetUnusedRegister(rc, pinned);
+ LiftoffRegister reg =
+ GetUnusedRegister(reg_class_for(slot.type()), pinned);
Fill(reg, cache_state_.stack_height(), slot.type());
return reg;
}
case VarState::kRegister:
- DCHECK_EQ(rc, slot.reg_class());
cache_state_.dec_used(slot.reg());
return slot.reg();
case VarState::KIntConst: {
+ RegClass rc =
+ kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
LiftoffRegister reg = GetUnusedRegister(rc, pinned);
LoadConstant(reg, slot.constant());
return reg;
@@ -461,7 +437,7 @@ void LiftoffAssembler::SpillAllRegisters() {
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
- LiftoffRegister* explicit_context) {
+ LiftoffRegister* target_instance) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
@@ -479,14 +455,14 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
StackTransferRecipe stack_transfers(this);
LiftoffRegList param_regs;
- // Move the explicit context (if any) into the correct context register.
- compiler::LinkageLocation context_loc =
+ // Move the target instance (if supplied) into the correct instance register.
+ compiler::LinkageLocation instance_loc =
call_descriptor->GetInputLocation(kInputShift);
- DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
- LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
- param_regs.set(context_reg);
- if (explicit_context && *explicit_context != context_reg) {
- stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
+ DCHECK(instance_loc.IsRegister() && !instance_loc.IsAnyRegister());
+ LiftoffRegister instance_reg(Register::from_code(instance_loc.AsRegister()));
+ param_regs.set(instance_reg);
+ if (target_instance && *target_instance != instance_reg) {
+ stack_transfers.MoveRegister(instance_reg, *target_instance, kWasmIntPtr);
}
// Now move all parameter values into the right slot for the call.
@@ -528,7 +504,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
}
- // {call_desc_input_idx} should point after the context parameter now.
+ // {call_desc_input_idx} should point after the instance parameter now.
DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
// If the target register overlaps with a parameter register, then move the
@@ -547,7 +523,7 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
}
- // Execute the stack transfers before filling the context register.
+ // Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
// Pop parameters from the value stack.
@@ -557,9 +533,9 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// Reset register use counters.
cache_state_.reset_used_registers();
- // Reload the context from the stack.
- if (!explicit_context) {
- FillContextInto(context_reg.gp());
+ // Reload the instance from the stack.
+ if (!target_instance) {
+ FillInstanceInto(instance_reg.gp());
}
}
@@ -579,7 +555,7 @@ void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
LiftoffRegister high_reg = LiftoffRegister::from_code(
rc, call_descriptor->GetReturnLocation(1).AsRegister());
DCHECK(GetCacheRegList(rc).has(high_reg));
- return_reg = LiftoffRegister::ForPair(return_reg, high_reg);
+ return_reg = LiftoffRegister::ForPair(return_reg.gp(), high_reg.gp());
}
DCHECK(!cache_state_.is_used(return_reg));
PushRegister(return_type, return_reg);
@@ -589,6 +565,7 @@ void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
ValueType type) {
DCHECK_EQ(dst.reg_class(), src.reg_class());
+ DCHECK_NE(dst, src);
if (kNeedI64RegPair && dst.is_pair()) {
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
@@ -600,6 +577,15 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
}
}
+void LiftoffAssembler::ParallelRegisterMove(
+ std::initializer_list<ParallelRegisterMoveTuple> tuples) {
+ StackTransferRecipe stack_transfers(this);
+ for (auto tuple : tuples) {
+ if (tuple.dst == tuple.src) continue;
+ stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
+ }
+}
+
LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned) {
// Spill one cached value to free a register.
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 99d9814dea..fafce663b7 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -104,8 +104,7 @@ class LiftoffAssembler : public TurboAssembler {
};
};
- static_assert(IS_TRIVIALLY_COPYABLE(VarState),
- "VarState should be trivially copyable");
+ ASSERT_TRIVIALLY_COPYABLE(VarState);
struct CacheState {
// Allow default construction, move construction, and move assignment.
@@ -141,8 +140,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister unused_register(RegClass rc,
LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
- LiftoffRegister low = pinned.set(unused_register(kGpReg, pinned));
- LiftoffRegister high = unused_register(kGpReg, pinned);
+ Register low = pinned.set(unused_register(kGpReg, pinned)).gp();
+ Register high = unused_register(kGpReg, pinned).gp();
return LiftoffRegister::ForPair(low, high);
}
DCHECK(rc == kGpReg || rc == kFpReg);
@@ -188,6 +187,11 @@ class LiftoffAssembler : public TurboAssembler {
}
uint32_t get_use_count(LiftoffRegister reg) const {
+ if (reg.is_pair()) {
+ DCHECK_EQ(register_use_count[reg.low().liftoff_code()],
+ register_use_count[reg.high().liftoff_code()]);
+ reg = reg.low();
+ }
DCHECK_GT(arraysize(register_use_count), reg.liftoff_code());
return register_use_count[reg.liftoff_code()];
}
@@ -242,12 +246,7 @@ class LiftoffAssembler : public TurboAssembler {
explicit LiftoffAssembler(Isolate* isolate);
~LiftoffAssembler();
- LiftoffRegister GetBinaryOpTargetRegister(RegClass,
- LiftoffRegList pinned = {});
- LiftoffRegister GetUnaryOpTargetRegister(RegClass,
- LiftoffRegList pinned = {});
-
- LiftoffRegister PopToRegister(RegClass, LiftoffRegList pinned = {});
+ LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class());
@@ -261,12 +260,24 @@ class LiftoffAssembler : public TurboAssembler {
return cache_state_.get_use_count(reg);
}
+ // Get an unused register for class {rc}, reusing one of {try_first} if
+ // possible.
+ LiftoffRegister GetUnusedRegister(
+ RegClass rc, std::initializer_list<LiftoffRegister> try_first,
+ LiftoffRegList pinned = {}) {
+ for (LiftoffRegister reg : try_first) {
+ DCHECK_EQ(reg.reg_class(), rc);
+ if (cache_state_.is_free(reg)) return reg;
+ }
+ return GetUnusedRegister(rc, pinned);
+ }
+
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
- LiftoffRegister low = pinned.set(GetUnusedRegister(candidates, pinned));
- LiftoffRegister high = GetUnusedRegister(candidates, pinned);
+ Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
+ Register high = GetUnusedRegister(candidates, pinned).gp();
return LiftoffRegister::ForPair(low, high);
}
DCHECK(rc == kGpReg || rc == kFpReg);
@@ -311,12 +322,23 @@ class LiftoffAssembler : public TurboAssembler {
// register, or {no_reg} if target was spilled to the stack.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
Register* target = nullptr,
- LiftoffRegister* explicit_context = nullptr);
+ LiftoffRegister* target_instance = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
+ // Move {src} into {dst}. {src} and {dst} must be different.
void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+ // Parallel register move: For a list of tuples <dst, src, type>, move the
+ // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
+ // that tuple.
+ struct ParallelRegisterMoveTuple {
+ LiftoffRegister dst;
+ LiftoffRegister src;
+ ValueType type;
+ };
+ void ParallelRegisterMove(std::initializer_list<ParallelRegisterMoveTuple>);
+
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
@@ -330,9 +352,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromContext(Register dst, uint32_t offset, int size);
- inline void SpillContext(Register context);
- inline void FillContextInto(Register dst);
+ inline void LoadFromInstance(Register dst, uint32_t offset, int size);
+ inline void SpillInstance(Register instance);
+ inline void FillInstanceInto(Register dst);
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uint32_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr);
@@ -350,6 +372,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void Spill(uint32_t index, LiftoffRegister, ValueType);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(LiftoffRegister, uint32_t index, ValueType);
+ // Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
+ // 4 bytes on the stack holding half of a 64-bit value. The two half_indexes
+ // corresponding to slot {index} are {2*index} and {2*index-1}.
inline void FillI64Half(Register, uint32_t half_index);
// i32 binops.
@@ -359,11 +384,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
- inline void emit_i32_shl(Register dst, Register lhs, Register rhs,
+ inline void emit_i32_shl(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
- inline void emit_i32_sar(Register dst, Register lhs, Register rhs,
+ inline void emit_i32_sar(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
- inline void emit_i32_shr(Register dst, Register lhs, Register rhs,
+ inline void emit_i32_shr(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
// i32 unops.
@@ -371,7 +396,32 @@ class LiftoffAssembler : public TurboAssembler {
inline bool emit_i32_ctz(Register dst, Register src);
inline bool emit_i32_popcnt(Register dst, Register src);
- inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs);
+ // i64 binops.
+ inline void emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned = {});
+ inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned = {});
+ inline void emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned = {});
+
+ inline void emit_ptrsize_add(Register dst, Register lhs, Register rhs) {
+ if (kPointerSize == 8) {
+ emit_i64_add(LiftoffRegister(dst), LiftoffRegister(lhs),
+ LiftoffRegister(rhs));
+ } else {
+ emit_i32_add(dst, lhs, rhs);
+ }
+ }
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
@@ -380,8 +430,16 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ inline void emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
// f32 unops.
+ inline void emit_f32_abs(DoubleRegister dst, DoubleRegister src);
inline void emit_f32_neg(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f32_ceil(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f32_floor(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f32_trunc(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f32_nearest_int(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f32_sqrt(DoubleRegister dst, DoubleRegister src);
// f64 binops.
inline void emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
@@ -390,18 +448,38 @@ class LiftoffAssembler : public TurboAssembler {
DoubleRegister rhs);
inline void emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
+ inline void emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs);
// f64 unops.
+ inline void emit_f64_abs(DoubleRegister dst, DoubleRegister src);
inline void emit_f64_neg(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f64_ceil(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f64_floor(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f64_trunc(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f64_nearest_int(DoubleRegister dst, DoubleRegister src);
+ inline void emit_f64_sqrt(DoubleRegister dst, DoubleRegister src);
+
+ // type conversions.
+ inline bool emit_type_conversion(WasmOpcode opcode, LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_jump(Label*);
+ inline void emit_jump(Register);
+
inline void emit_cond_jump(Condition, Label*, ValueType value, Register lhs,
Register rhs = no_reg);
// Set {dst} to 1 if condition holds, 0 otherwise.
+ inline void emit_i32_eqz(Register dst, Register src);
inline void emit_i32_set_cond(Condition, Register dst, Register lhs,
- Register rhs = no_reg);
- inline void emit_f32_set_cond(Condition, Register dst, DoubleRegister lhs,
- DoubleRegister rhs);
+ Register rhs);
+ inline void emit_i64_eqz(Register dst, LiftoffRegister src);
+ inline void emit_i64_set_cond(Condition condition, Register dst,
+ LiftoffRegister lhs, LiftoffRegister rhs);
+ inline void emit_f32_set_cond(Condition condition, Register dst,
+ DoubleRegister lhs, DoubleRegister rhs);
+ inline void emit_f64_set_cond(Condition condition, Register dst,
+ DoubleRegister lhs, DoubleRegister rhs);
inline void StackCheck(Label* ool_code);
@@ -418,15 +496,25 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
- // Push arguments on the stack (in the caller frame), then align the stack.
- // The address of the last argument will be stored to {arg_addr_dst}. Previous
- // arguments will be located at pointer sized buckets above that address.
- inline void PrepareCCall(uint32_t num_params, const Register* args);
- inline void SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params);
- inline void SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx, uint32_t num_params);
+ // {PrepareCCall} pushes the arguments on the stack (in the caller frame),
+ // then aligns the stack to do a c call. Pointers to the pushed arguments are
+ // later loaded to registers or stack slots via {SetCCall*ParamAddr}. After
+ // the c call, the output parameter (if it exists) can be loaded via
+ // {LoadCCallOutArgument}. {FinishCCall} resets the stack pointer to the state
+ // before {PrepareCCall}.
+ // The {FunctionSig} passed to {PrepareCCall} describes the types of
+ // parameters which are then passed ot the C function via pointers, excluding
+ // the out argument.
+ inline void PrepareCCall(wasm::FunctionSig* sig, const LiftoffRegister* args,
+ ValueType out_argument_type);
+ inline void SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type);
+ inline void SetCCallStackParamAddr(int stack_param_idx, int param_byte_offset,
+ ValueType type);
+ inline void LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset);
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
+ inline void FinishCCall();
inline void CallNativeWasmCode(Address addr);
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
@@ -491,6 +579,51 @@ class LiftoffAssembler : public TurboAssembler {
std::ostream& operator<<(std::ostream& os, LiftoffAssembler::VarState);
+// =======================================================================
+// Partially platform-independent implementations of the platform-dependent
+// part.
+
+#ifdef V8_TARGET_ARCH_32_BIT
+
+namespace liftoff {
+template <void (LiftoffAssembler::*op)(Register, Register, Register)>
+void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
+ LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Register pairs are either the same, or they don't overlap at all, so the
+ // low and high registers must be disjoint. Just handle them separately.
+ DCHECK_EQ(LiftoffRegList{},
+ LiftoffRegList::ForRegs(dst.low(), lhs.low(), rhs.low()) &
+ LiftoffRegList::ForRegs(dst.high(), lhs.high(), rhs.high()));
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
+ this, dst, lhs, rhs);
+}
+
+#endif // V8_TARGET_ARCH_32_BIT
+
+// End of the partially platform-independent implementations of the
+// platform-dependent part.
+// =======================================================================
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index c6adb90f82..914fc2aafa 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -19,6 +19,8 @@ namespace v8 {
namespace internal {
namespace wasm {
+using WasmCompilationData = compiler::WasmCompilationData;
+
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
constexpr auto KIntConst = LiftoffAssembler::VarState::KIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
@@ -32,6 +34,16 @@ namespace {
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
+#define WASM_INSTANCE_OBJECT_OFFSET(name) \
+ (WasmInstanceObject::k##name##Offset - kHeapObjectTag)
+
+#define LOAD_INSTANCE_FIELD(dst, name, type) \
+ __ LoadFromInstance(dst.gp(), WASM_INSTANCE_OBJECT_OFFSET(name), \
+ LoadType(type).size());
+
+constexpr LoadType::LoadTypeValue kPointerLoadType =
+ kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
+
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Moving labels would confuse the Assembler,
@@ -64,16 +76,6 @@ class MovableLabel {
};
#endif
-wasm::WasmValue WasmPtrValue(uintptr_t ptr) {
- using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
- static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
- return wasm::WasmValue(static_cast<int_t>(ptr));
-}
-
-wasm::WasmValue WasmPtrValue(void* ptr) {
- return WasmPtrValue(reinterpret_cast<uintptr_t>(ptr));
-}
-
compiler::CallDescriptor* GetLoweredCallDescriptor(
Zone* zone, compiler::CallDescriptor* call_desc) {
return kPointerSize == 4 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
@@ -131,11 +133,10 @@ class LiftoffCompiler {
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
compiler::CallDescriptor* call_descriptor,
compiler::ModuleEnv* env,
- compiler::RuntimeExceptionSupport runtime_exception_support,
SourcePositionTableBuilder* source_position_table_builder,
- std::vector<trap_handler::ProtectedInstructionData>*
- protected_instructions,
- Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone)
+ WasmCompilationData* wasm_compilation_data,
+ Zone* compilation_zone, std::unique_ptr<Zone>* codegen_zone,
+ WasmCode* const* code_table_entry)
: asm_(liftoff_asm),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@@ -145,12 +146,12 @@ class LiftoffCompiler {
? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages} *
wasm::kWasmPageSize),
- runtime_exception_support_(runtime_exception_support),
source_position_table_builder_(source_position_table_builder),
- protected_instructions_(protected_instructions),
+ wasm_compilation_data_(wasm_compilation_data),
compilation_zone_(compilation_zone),
codegen_zone_(codegen_zone),
- safepoint_table_builder_(compilation_zone_) {}
+ safepoint_table_builder_(compilation_zone_),
+ code_table_entry_(code_table_entry) {}
~LiftoffCompiler() { BindUnboundLabels(nullptr); }
@@ -217,7 +218,7 @@ class LiftoffCompiler {
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
- const int num_lowered_params = 1 + (kNeedI64RegPair && type == kWasmI64);
+ const int num_lowered_params = 1 + needs_reg_pair(type);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg = LiftoffRegister::from_code(kGpReg, 0);
RegClass rc = num_lowered_params == 1 ? reg_class_for(type) : kGpReg;
@@ -252,7 +253,8 @@ class LiftoffCompiler {
__ LoadCallerFrameSlot(in_reg, -param_loc.AsCallerFrameSlot(),
lowered_type);
}
- reg = pair_idx == 0 ? in_reg : LiftoffRegister::ForPair(reg, in_reg);
+ reg = pair_idx == 0 ? in_reg
+ : LiftoffRegister::ForPair(reg.gp(), in_reg.gp());
pinned.set(reg);
}
__ PushRegister(type, reg);
@@ -260,7 +262,10 @@ class LiftoffCompiler {
}
void StackCheck(wasm::WasmCodePosition position) {
- if (FLAG_wasm_no_stack_checks || !runtime_exception_support_) return;
+ if (FLAG_wasm_no_stack_checks ||
+ !wasm_compilation_data_->runtime_exception_support()) {
+ return;
+ }
out_of_line_code_.push_back(
OutOfLineCode::StackCheck(position, __ cache_state()->used_registers));
OutOfLineCode& ool = out_of_line_code_.back();
@@ -268,6 +273,49 @@ class LiftoffCompiler {
if (ool.continuation) __ bind(ool.continuation.get());
}
+ // Inserts a check whether the optimized version of this code already exists.
+ // If so, it redirects execution to the optimized code.
+ void JumpToOptimizedCodeIfExisting() {
+ // Check whether we have an optimized function before
+ // continuing to execute the Liftoff-compiled code.
+ // TODO(clemensh): Reduce number of temporary registers.
+ LiftoffRegList pinned;
+ LiftoffRegister wasm_code_addr =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister target_code_addr =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister code_start_address =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ // Get the current code's target address ({instructions_.start()}).
+ __ ComputeCodeStartAddress(code_start_address.gp());
+
+ static LoadType kPointerLoadType =
+ LoadType::ForValueType(LiftoffAssembler::kWasmIntPtr);
+ using int_t = std::conditional<kPointerSize == 8, uint64_t, uint32_t>::type;
+ static_assert(sizeof(int_t) == sizeof(uintptr_t), "weird uintptr_t");
+ // Get the address of the WasmCode* currently stored in the code table.
+ __ LoadConstant(target_code_addr,
+ WasmValue(reinterpret_cast<int_t>(code_table_entry_)),
+ RelocInfo::WASM_CODE_TABLE_ENTRY);
+ // Load the corresponding WasmCode*.
+ __ Load(wasm_code_addr, target_code_addr.gp(), Register::no_reg(), 0,
+ kPointerLoadType, pinned);
+ // Load its target address ({instuctions_.start()}).
+ __ Load(target_code_addr, wasm_code_addr.gp(), Register::no_reg(),
+ WasmCode::kInstructionStartOffset, kPointerLoadType, pinned);
+
+ // If the current code's target address is the same as the
+ // target address of the stored WasmCode, then continue executing, otherwise
+ // jump to the updated WasmCode.
+ Label cont;
+ __ emit_cond_jump(kEqual, &cont, LiftoffAssembler::kWasmIntPtr,
+ target_code_addr.gp(), code_start_address.gp());
+ __ LeaveFrame(StackFrame::WASM_COMPILED);
+ __ emit_jump(target_code_addr.gp());
+ __ bind(&cont);
+ }
+
void StartFunctionBody(Decoder* decoder, Control* block) {
__ EnterFrame(StackFrame::WASM_COMPILED);
__ set_has_frame(true);
@@ -279,24 +327,24 @@ class LiftoffCompiler {
// finish compilation without errors even if we hit unimplemented
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
- // Parameter 0 is the wasm context.
+ // Parameter 0 is the instance parameter.
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kTypes_ilfd, __ local_type(i), "param"))
return;
}
- // Input 0 is the call target, the context is at 1.
- constexpr int kContextParameterIndex = 1;
- // Store the context parameter to a special stack slot.
- compiler::LinkageLocation context_loc =
- descriptor_->GetInputLocation(kContextParameterIndex);
- DCHECK(context_loc.IsRegister());
- DCHECK(!context_loc.IsAnyRegister());
- Register context_reg = Register::from_code(context_loc.AsRegister());
- __ SpillContext(context_reg);
- // Input 0 is the code target, 1 is the context. First parameter at 2.
- uint32_t input_idx = kContextParameterIndex + 1;
+ // Input 0 is the call target, the instance is at 1.
+ constexpr int kInstanceParameterIndex = 1;
+ // Store the instance parameter to a special stack slot.
+ compiler::LinkageLocation instance_loc =
+ descriptor_->GetInputLocation(kInstanceParameterIndex);
+ DCHECK(instance_loc.IsRegister());
+ DCHECK(!instance_loc.IsAnyRegister());
+ Register instance_reg = Register::from_code(instance_loc.AsRegister());
+ __ SpillInstance(instance_reg);
+ // Input 0 is the code target, 1 is the instance. First parameter at 2.
+ uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
input_idx += ProcessParameter(__ local_type(param_idx), input_idx);
}
@@ -336,12 +384,29 @@ class LiftoffCompiler {
StackCheck(0);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
+
+ // TODO(kimanh): if possible, we want to move this check further up,
+ // in order to avoid unnecessary overhead each time we enter
+ // a Liftoff-compiled function that will jump to a Turbofan-compiled
+ // function.
+ if (FLAG_wasm_tier_up) {
+ JumpToOptimizedCodeIfExisting();
+ }
}
void GenerateOutOfLineCode(OutOfLineCode& ool) {
__ bind(ool.label.get());
const bool is_stack_check = ool.builtin == Builtins::kWasmStackGuard;
- if (!runtime_exception_support_) {
+ const bool is_mem_out_of_bounds =
+ ool.builtin == Builtins::kThrowWasmTrapMemOutOfBounds;
+
+ if (is_mem_out_of_bounds && env_->use_trap_handler) {
+ uint32_t pc = static_cast<uint32_t>(__ pc_offset());
+ DCHECK_EQ(pc, __ pc_offset());
+ wasm_compilation_data_->AddProtectedInstruction(ool.pc, pc);
+ }
+
+ if (!wasm_compilation_data_->runtime_exception_support()) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// In this mode, we never generate stack checks.
@@ -352,13 +417,6 @@ class LiftoffCompiler {
return;
}
- if (!is_stack_check && env_->use_trap_handler) {
- uint32_t pc = static_cast<uint32_t>(__ pc_offset());
- DCHECK_EQ(pc, __ pc_offset());
- protected_instructions_->emplace_back(
- trap_handler::ProtectedInstructionData{ool.pc, pc});
- }
-
if (!ool.regs_to_save.is_empty()) __ PushRegisters(ool.regs_to_save);
source_position_table_builder_->AddPosition(
@@ -432,7 +490,7 @@ class LiftoffCompiler {
if_block->else_state = base::make_unique<ElseState>();
// Test the condition, jump to else if zero.
- Register value = __ PopToRegister(kGpReg).gp();
+ Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
value);
@@ -465,68 +523,109 @@ class LiftoffCompiler {
void EndControl(Decoder* decoder, Control* c) {}
- void GenerateCCall(Register res_reg, uint32_t num_args,
- const Register* arg_regs, ExternalReference ext_ref) {
- static constexpr int kNumReturns = 1;
+ enum CCallReturn : bool { kHasReturn = true, kNoReturn = false };
+
+ void GenerateCCall(const LiftoffRegister* result_regs, FunctionSig* sig,
+ ValueType out_argument_type,
+ const LiftoffRegister* arg_regs,
+ ExternalReference ext_ref) {
+ static constexpr int kMaxReturns = 1;
static constexpr int kMaxArgs = 2;
static constexpr MachineType kReps[]{
MachineType::Uint32(), MachineType::Pointer(), MachineType::Pointer()};
- static_assert(arraysize(kReps) == kNumReturns + kMaxArgs, "mismatch");
+ static_assert(arraysize(kReps) == kMaxReturns + kMaxArgs, "mismatch");
+
+ const bool has_out_argument = out_argument_type != kWasmStmt;
+ const uint32_t num_returns = static_cast<uint32_t>(sig->return_count());
+ // {total_num_args} is {num_args + 1} if the return value is stored in an
+ // out parameter, or {num_args} otherwise.
+ const uint32_t num_args = static_cast<uint32_t>(sig->parameter_count());
+ const uint32_t total_num_args = num_args + has_out_argument;
DCHECK_LE(num_args, kMaxArgs);
+ DCHECK_LE(num_returns, kMaxReturns);
- MachineSignature sig(kNumReturns, num_args, kReps);
- auto call_descriptor =
- compiler::Linkage::GetSimplifiedCDescriptor(compilation_zone_, &sig);
+ MachineSignature machine_sig(num_returns, total_num_args,
+ kReps + (kMaxReturns - num_returns));
+ auto* call_descriptor = compiler::Linkage::GetSimplifiedCDescriptor(
+ compilation_zone_, &machine_sig);
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
- uint32_t num_params =
- static_cast<uint32_t>(call_descriptor->ParameterCount());
- __ PrepareCCall(num_params, arg_regs);
-
- // Set parameters (in sp[0], sp[8], ...).
- uint32_t num_stack_params = 0;
- for (uint32_t param = 0; param < num_params; ++param) {
- constexpr size_t kInputShift = 1; // Input 0 is the call target.
-
+ __ PrepareCCall(sig, arg_regs, out_argument_type);
+
+ // The arguments to the c function are pointers to the stack slots we just
+ // pushed.
+ int num_stack_params = 0; // Number of stack parameters.
+ int input_idx = 1; // Input 0 is the call target.
+ int param_byte_offset = 0; // Byte offset into the pushed arguments.
+ auto add_argument = [&](ValueType arg_type) {
compiler::LinkageLocation loc =
- call_descriptor->GetInputLocation(param + kInputShift);
+ call_descriptor->GetInputLocation(input_idx);
+ param_byte_offset +=
+ RoundUp<kPointerSize>(WasmOpcodes::MemSize(arg_type));
+ ++input_idx;
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
- __ SetCCallRegParamAddr(reg, param, num_params);
+ __ SetCCallRegParamAddr(reg, param_byte_offset, arg_type);
} else {
DCHECK(loc.IsCallerFrameSlot());
- __ SetCCallStackParamAddr(num_stack_params, param, num_params);
+ __ SetCCallStackParamAddr(num_stack_params, param_byte_offset,
+ arg_type);
++num_stack_params;
}
+ };
+ for (ValueType arg_type : sig->parameters()) {
+ add_argument(arg_type);
}
+ if (has_out_argument) {
+ add_argument(out_argument_type);
+ }
+ DCHECK_EQ(input_idx, call_descriptor->InputCount());
// Now execute the call.
- __ CallC(ext_ref, num_params);
+ uint32_t c_call_arg_count =
+ static_cast<uint32_t>(sig->parameter_count()) + has_out_argument;
+ __ CallC(ext_ref, c_call_arg_count);
+
+ // Reset the stack pointer.
+ __ FinishCCall();
// Load return value.
- compiler::LinkageLocation return_loc =
- call_descriptor->GetReturnLocation(0);
- DCHECK(return_loc.IsRegister());
- Register return_reg = Register::from_code(return_loc.AsRegister());
- if (return_reg != res_reg) {
- DCHECK_EQ(MachineRepresentation::kWord32,
- sig.GetReturn(0).representation());
- __ Move(LiftoffRegister(res_reg), LiftoffRegister(return_reg), kWasmI32);
+ const LiftoffRegister* next_result_reg = result_regs;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ compiler::LinkageLocation return_loc =
+ call_descriptor->GetReturnLocation(0);
+ DCHECK(return_loc.IsRegister());
+ Register return_reg = Register::from_code(return_loc.AsRegister());
+ if (return_reg != next_result_reg->gp()) {
+ __ Move(*next_result_reg, LiftoffRegister(return_reg),
+ sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential return value from output argument.
+ if (has_out_argument) {
+ __ LoadCCallOutArgument(*next_result_reg, out_argument_type,
+ param_byte_offset);
}
}
- template <ValueType type, class EmitFn>
+ template <ValueType src_type, ValueType result_type, class EmitFn>
void EmitUnOp(EmitFn fn) {
- static RegClass rc = reg_class_for(type);
+ static RegClass src_rc = reg_class_for(src_type);
+ static RegClass result_rc = reg_class_for(result_type);
LiftoffRegList pinned;
- LiftoffRegister dst = pinned.set(__ GetUnaryOpTargetRegister(rc));
- LiftoffRegister src = __ PopToRegister(rc, pinned);
+ LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst = src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {src}, pinned)
+ : __ GetUnusedRegister(result_rc, pinned);
fn(dst, src);
- __ PushRegister(type, dst);
+ __ PushRegister(result_type, dst);
}
void EmitI32UnOpWithCFallback(bool (LiftoffAssembler::*emit_fn)(Register,
@@ -535,64 +634,122 @@ class LiftoffCompiler {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if (emit_fn && (asm_->*emit_fn)(dst.gp(), src.gp())) return;
ExternalReference ext_ref = fallback_fn(asm_->isolate());
- Register args[] = {src.gp()};
- GenerateCCall(dst.gp(), arraysize(args), args, ext_ref);
+ ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
+ FunctionSig sig_i_i(1, 1, sig_i_i_reps);
+ GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src, ext_ref);
};
- EmitUnOp<kWasmI32>(emit_with_c_fallback);
+ EmitUnOp<kWasmI32, kWasmI32>(emit_with_c_fallback);
+ }
+
+ void EmitTypeConversion(WasmOpcode opcode, ValueType dst_type,
+ ValueType src_type,
+ ExternalReference (*fallback_fn)(Isolate*)) {
+ RegClass src_rc = reg_class_for(src_type);
+ RegClass dst_rc = reg_class_for(dst_type);
+ LiftoffRegList pinned;
+ LiftoffRegister src = pinned.set(__ PopToRegister());
+ LiftoffRegister dst = src_rc == dst_rc
+ ? __ GetUnusedRegister(dst_rc, {src}, pinned)
+ : __ GetUnusedRegister(dst_rc, pinned);
+ if (!__ emit_type_conversion(opcode, dst, src)) {
+ DCHECK_NOT_NULL(fallback_fn);
+ ExternalReference ext_ref = fallback_fn(asm_->isolate());
+ ValueType sig_reps[] = {src_type};
+ FunctionSig sig(0, 1, sig_reps);
+ GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
+ }
+ __ PushRegister(dst_type, dst);
}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
-#define CASE_I32_UNOP(opcode, fn) \
- case WasmOpcode::kExpr##opcode: \
- EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) { \
- __ emit_##fn(dst.gp(), src.gp()); \
- }); \
+#define CASE_I32_UNOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasmI32, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.gp(), src.gp()); \
+ }); \
break;
-#define CASE_FLOAT_UNOP(opcode, type, fn) \
- case WasmOpcode::kExpr##opcode: \
- EmitUnOp<kWasm##type>([=](LiftoffRegister dst, LiftoffRegister src) { \
- __ emit_##fn(dst.fp(), src.fp()); \
- }); \
+#define CASE_FLOAT_UNOP(opcode, type, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitUnOp<kWasm##type, kWasm##type>( \
+ [=](LiftoffRegister dst, LiftoffRegister src) { \
+ __ emit_##fn(dst.fp(), src.fp()); \
+ }); \
+ break;
+#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref) \
+ case WasmOpcode::kExpr##opcode: \
+ EmitTypeConversion(kExpr##opcode, kWasm##dst_type, kWasm##src_type, \
+ ext_ref); \
break;
switch (opcode) {
+ CASE_I32_UNOP(I32Eqz, i32_eqz)
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
+ CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
+ CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
+ CASE_FLOAT_UNOP(F32Ceil, F32, f32_ceil)
+ CASE_FLOAT_UNOP(F32Floor, F32, f32_floor)
+ CASE_FLOAT_UNOP(F32Trunc, F32, f32_trunc)
+ CASE_FLOAT_UNOP(F32NearestInt, F32, f32_nearest_int)
+ CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
+ CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
+ CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
+ CASE_FLOAT_UNOP(F64Ceil, F64, f64_ceil)
+ CASE_FLOAT_UNOP(F64Floor, F64, f64_floor)
+ CASE_FLOAT_UNOP(F64Trunc, F64, f64_trunc)
+ CASE_FLOAT_UNOP(F64NearestInt, F64, f64_nearest_int)
+ CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
+ CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr)
+ CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr)
+ CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr)
+ CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr)
+ CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr)
+ CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr)
+ CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr)
+ CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
+ &ExternalReference::wasm_int64_to_float32)
+ CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
+ &ExternalReference::wasm_uint64_to_float32)
+ CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr)
+ CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr)
+ CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr)
+ CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr)
+ CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
+ &ExternalReference::wasm_int64_to_float64)
+ CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
+ &ExternalReference::wasm_uint64_to_float64)
+ CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr)
+ CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr)
case kExprI32Popcnt:
EmitI32UnOpWithCFallback(&LiftoffAssembler::emit_i32_popcnt,
&ExternalReference::wasm_word32_popcnt);
break;
- case kExprI32Eqz:
- EmitUnOp<kWasmI32>([=](LiftoffRegister dst, LiftoffRegister src) {
- __ emit_i32_set_cond(kEqual, dst.gp(), src.gp());
- });
+ case WasmOpcode::kExprI64Eqz:
+ EmitUnOp<kWasmI64, kWasmI32>(
+ [=](LiftoffRegister dst, LiftoffRegister src) {
+ __ emit_i64_eqz(dst.gp(), src);
+ });
break;
- CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
- CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_UNOP
#undef CASE_FLOAT_UNOP
+#undef CASE_TYPE_CONVERSION
}
- template <ValueType type, typename EmitFn>
- void EmitMonomorphicBinOp(EmitFn fn) {
- static constexpr RegClass rc = reg_class_for(type);
- LiftoffRegList pinned;
- LiftoffRegister dst = pinned.set(__ GetBinaryOpTargetRegister(rc));
- LiftoffRegister rhs = pinned.set(__ PopToRegister(rc, pinned));
- LiftoffRegister lhs = __ PopToRegister(rc, pinned);
- fn(dst, lhs, rhs);
- __ PushRegister(type, dst);
- }
-
- template <ValueType result_type, RegClass src_rc, typename EmitFn>
- void EmitBinOpWithDifferentResultType(EmitFn fn) {
+ template <ValueType src_type, ValueType result_type, typename EmitFn>
+ void EmitBinOp(EmitFn fn) {
+ static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegList pinned;
- LiftoffRegister rhs = pinned.set(__ PopToRegister(src_rc, pinned));
- LiftoffRegister lhs = pinned.set(__ PopToRegister(src_rc, pinned));
- LiftoffRegister dst = __ GetUnusedRegister(reg_class_for(result_type));
+ LiftoffRegister rhs = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister lhs = pinned.set(__ PopToRegister(pinned));
+ LiftoffRegister dst =
+ src_rc == result_rc
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs}, pinned)
+ : __ GetUnusedRegister(result_rc);
fn(dst, lhs, rhs);
__ PushRegister(result_type, dst);
}
@@ -601,41 +758,69 @@ class LiftoffCompiler {
const Value& lhs, const Value& rhs, Value* result) {
#define CASE_I32_BINOP(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
- return EmitMonomorphicBinOp<kWasmI32>( \
+ return EmitBinOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp()); \
});
+#define CASE_I64_BINOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOp<kWasmI64, kWasmI64>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_##fn(dst, lhs, rhs); \
+ });
#define CASE_FLOAT_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
- return EmitMonomorphicBinOp<kWasm##type>( \
+ return EmitBinOp<kWasm##type, kWasm##type>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.fp(), lhs.fp(), rhs.fp()); \
});
#define CASE_I32_CMPOP(opcode, cond) \
case WasmOpcode::kExpr##opcode: \
- return EmitMonomorphicBinOp<kWasmI32>( \
+ return EmitBinOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_i32_set_cond(cond, dst.gp(), lhs.gp(), rhs.gp()); \
});
+#define CASE_I64_CMPOP(opcode, cond) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOp<kWasmI64, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_i64_set_cond(cond, dst.gp(), lhs, rhs); \
+ });
#define CASE_F32_CMPOP(opcode, cond) \
case WasmOpcode::kExpr##opcode: \
- return EmitBinOpWithDifferentResultType<kWasmI32, kFpReg>( \
+ return EmitBinOp<kWasmF32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_f32_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
});
-#define CASE_SHIFTOP(opcode, fn) \
+#define CASE_F64_CMPOP(opcode, cond) \
case WasmOpcode::kExpr##opcode: \
- return EmitMonomorphicBinOp<kWasmI32>( \
+ return EmitBinOp<kWasmF64, kWasmI32>( \
+ [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ __ emit_f64_set_cond(cond, dst.gp(), lhs.fp(), rhs.fp()); \
+ });
+#define CASE_I32_SHIFTOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
__ emit_##fn(dst.gp(), lhs.gp(), rhs.gp(), {}); \
});
+#define CASE_I64_SHIFTOP(opcode, fn) \
+ case WasmOpcode::kExpr##opcode: \
+ return EmitBinOp<kWasmI64, kWasmI64>([=](LiftoffRegister dst, \
+ LiftoffRegister src, \
+ LiftoffRegister amount) { \
+ __ emit_##fn(dst, src, amount.is_pair() ? amount.low_gp() : amount.gp(), \
+ {}); \
+ });
#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
case WasmOpcode::kExpr##opcode: \
- return EmitMonomorphicBinOp<kWasmI32>( \
+ return EmitBinOp<kWasmI32, kWasmI32>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
- Register args[] = {lhs.gp(), rhs.gp()}; \
+ LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(__ isolate()); \
- GenerateCCall(dst.gp(), arraysize(args), args, ext_ref); \
+ ValueType sig_i_ii_reps[] = {kWasmI32, kWasmI32, kWasmI32}; \
+ FunctionSig sig_i_ii(1, 2, sig_i_ii_reps); \
+ GenerateCCall(&dst, &sig_i_ii, kWasmStmt, args, ext_ref); \
});
switch (opcode) {
CASE_I32_BINOP(I32Add, i32_add)
@@ -644,6 +829,9 @@ class LiftoffCompiler {
CASE_I32_BINOP(I32And, i32_and)
CASE_I32_BINOP(I32Ior, i32_or)
CASE_I32_BINOP(I32Xor, i32_xor)
+ CASE_I64_BINOP(I64And, i64_and)
+ CASE_I64_BINOP(I64Ior, i64_or)
+ CASE_I64_BINOP(I64Xor, i64_xor)
CASE_I32_CMPOP(I32Eq, kEqual)
CASE_I32_CMPOP(I32Ne, kUnequal)
CASE_I32_CMPOP(I32LtS, kSignedLessThan)
@@ -654,31 +842,58 @@ class LiftoffCompiler {
CASE_I32_CMPOP(I32LeU, kUnsignedLessEqual)
CASE_I32_CMPOP(I32GeS, kSignedGreaterEqual)
CASE_I32_CMPOP(I32GeU, kUnsignedGreaterEqual)
+ CASE_I64_BINOP(I64Add, i64_add)
+ CASE_I64_BINOP(I64Sub, i64_sub)
+ CASE_I64_CMPOP(I64Eq, kEqual)
+ CASE_I64_CMPOP(I64Ne, kUnequal)
+ CASE_I64_CMPOP(I64LtS, kSignedLessThan)
+ CASE_I64_CMPOP(I64LtU, kUnsignedLessThan)
+ CASE_I64_CMPOP(I64GtS, kSignedGreaterThan)
+ CASE_I64_CMPOP(I64GtU, kUnsignedGreaterThan)
+ CASE_I64_CMPOP(I64LeS, kSignedLessEqual)
+ CASE_I64_CMPOP(I64LeU, kUnsignedLessEqual)
+ CASE_I64_CMPOP(I64GeS, kSignedGreaterEqual)
+ CASE_I64_CMPOP(I64GeU, kUnsignedGreaterEqual)
CASE_F32_CMPOP(F32Eq, kEqual)
CASE_F32_CMPOP(F32Ne, kUnequal)
CASE_F32_CMPOP(F32Lt, kUnsignedLessThan)
CASE_F32_CMPOP(F32Gt, kUnsignedGreaterThan)
CASE_F32_CMPOP(F32Le, kUnsignedLessEqual)
CASE_F32_CMPOP(F32Ge, kUnsignedGreaterEqual)
- CASE_SHIFTOP(I32Shl, i32_shl)
- CASE_SHIFTOP(I32ShrS, i32_sar)
- CASE_SHIFTOP(I32ShrU, i32_shr)
+ CASE_F64_CMPOP(F64Eq, kEqual)
+ CASE_F64_CMPOP(F64Ne, kUnequal)
+ CASE_F64_CMPOP(F64Lt, kUnsignedLessThan)
+ CASE_F64_CMPOP(F64Gt, kUnsignedGreaterThan)
+ CASE_F64_CMPOP(F64Le, kUnsignedLessEqual)
+ CASE_F64_CMPOP(F64Ge, kUnsignedGreaterEqual)
+ CASE_I32_SHIFTOP(I32Shl, i32_shl)
+ CASE_I32_SHIFTOP(I32ShrS, i32_sar)
+ CASE_I32_SHIFTOP(I32ShrU, i32_shr)
+ CASE_I64_SHIFTOP(I64Shl, i64_shl)
+ CASE_I64_SHIFTOP(I64ShrS, i64_sar)
+ CASE_I64_SHIFTOP(I64ShrU, i64_shr)
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
CASE_FLOAT_BINOP(F32Add, F32, f32_add)
CASE_FLOAT_BINOP(F32Sub, F32, f32_sub)
CASE_FLOAT_BINOP(F32Mul, F32, f32_mul)
+ CASE_FLOAT_BINOP(F32Div, F32, f32_div)
CASE_FLOAT_BINOP(F64Add, F64, f64_add)
CASE_FLOAT_BINOP(F64Sub, F64, f64_sub)
CASE_FLOAT_BINOP(F64Mul, F64, f64_mul)
+ CASE_FLOAT_BINOP(F64Div, F64, f64_div)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_I32_BINOP
+#undef CASE_I64_BINOP
#undef CASE_FLOAT_BINOP
#undef CASE_I32_CMPOP
+#undef CASE_I64_CMPOP
#undef CASE_F32_CMPOP
-#undef CASE_SHIFTOP
+#undef CASE_F64_CMPOP
+#undef CASE_I32_SHIFTOP
+#undef CASE_I64_SHIFTOP
#undef CASE_CCALL_BINOP
}
@@ -713,6 +928,10 @@ class LiftoffCompiler {
__ PushRegister(kWasmF64, reg);
}
+ void RefNull(Decoder* decoder, Value* result) {
+ unsupported(decoder, "ref_null");
+ }
+
void Drop(Decoder* decoder, const Value& value) {
__ DropStackSlot(&__ cache_state()->stack_state.back());
__ cache_state()->stack_state.pop_back();
@@ -727,8 +946,7 @@ class LiftoffCompiler {
}
if (!values.is_empty()) {
if (values.size() > 1) return unsupported(decoder, "multi-return");
- RegClass rc = reg_class_for(values[0].type);
- LiftoffRegister reg = __ PopToRegister(rc);
+ LiftoffRegister reg = __ PopToRegister();
__ MoveToReturnRegister(reg, values[0].type);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
@@ -812,35 +1030,29 @@ class LiftoffCompiler {
void GetGlobal(Decoder* decoder, Value* result,
const GlobalIndexOperand<validate>& operand) {
const auto* global = &env_->module->globals[operand.index];
- if (global->type != kWasmI32 && global->type != kWasmI64)
- return unsupported(decoder, "non-int global");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
+ return;
LiftoffRegList pinned;
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
- __ LoadFromContext(addr, offsetof(WasmContext, globals_start),
- kPointerSize);
+ LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
- LoadType type =
- global->type == kWasmI32 ? LoadType::kI32Load : LoadType::kI64Load;
- if (type.size() > kPointerSize)
- return unsupported(decoder, "global > kPointerSize");
- __ Load(value, addr, no_reg, global->offset, type, pinned);
+ LoadType type = LoadType::ForValueType(global->type);
+ __ Load(value, addr.gp(), no_reg, global->offset, type, pinned);
__ PushRegister(global->type, value);
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
auto* global = &env_->module->globals[operand.index];
- if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
+ if (!CheckSupportedType(decoder, kTypes_ilfd, global->type, "global"))
+ return;
LiftoffRegList pinned;
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg)).gp();
- __ LoadFromContext(addr, offsetof(WasmContext, globals_start),
- kPointerSize);
- LiftoffRegister reg =
- pinned.set(__ PopToRegister(reg_class_for(global->type), pinned));
- StoreType type =
- global->type == kWasmI32 ? StoreType::kI32Store : StoreType::kI64Store;
- __ Store(addr, no_reg, global->offset, reg, type, pinned);
+ LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg));
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kPointerLoadType);
+ LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
+ StoreType type = StoreType::ForValueType(global->type);
+ __ Store(addr.gp(), no_reg, global->offset, reg, type, pinned);
}
void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); }
@@ -865,7 +1077,7 @@ class LiftoffCompiler {
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
- Register value = __ PopToRegister(kGpReg).gp();
+ Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
Br(target);
@@ -916,7 +1128,7 @@ class LiftoffCompiler {
void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
LiftoffRegList pinned;
- LiftoffRegister value = pinned.set(__ PopToRegister(kGpReg));
+ LiftoffRegister value = pinned.set(__ PopToRegister());
BranchTableIterator<validate> table_iterator(decoder, operand);
std::map<uint32_t, MovableLabel> br_targets;
@@ -968,8 +1180,12 @@ class LiftoffCompiler {
return false;
}
+ // TODO(eholk): This adds protected instruction information for the jump
+ // instruction we are about to generate. It would be better to just not add
+ // protected instruction info when the pc is 0.
Label* trap_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds);
+ decoder->position(), Builtins::kThrowWasmTrapMemOutOfBounds,
+ env_->use_trap_handler ? __ pc_offset() : 0);
if (statically_oob) {
__ emit_jump(trap_label);
@@ -991,7 +1207,7 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- __ LoadFromContext(mem_size.gp(), offsetof(WasmContext, mem_size), 4);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, LoadType::kI32Load);
__ LoadConstant(end_offset_reg, WasmValue(end_offset));
if (end_offset >= min_size_) {
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32,
@@ -1078,16 +1294,16 @@ class LiftoffCompiler {
ValueType value_type = type.value_type();
if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "load")) return;
LiftoffRegList pinned;
- Register index = pinned.set(__ PopToRegister(kGpReg)).gp();
+ Register index = pinned.set(__ PopToRegister()).gp();
if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
return;
}
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
RegClass rc = reg_class_for(value_type);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
- __ Load(value, addr, index, operand.offset, type, pinned,
+ __ Load(value, addr.gp(), index, operand.offset, type, pinned,
&protected_load_pc);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
@@ -1107,17 +1323,16 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) {
ValueType value_type = type.value_type();
if (!CheckSupportedType(decoder, kTypes_ilfd, value_type, "store")) return;
- RegClass rc = reg_class_for(value_type);
LiftoffRegList pinned;
- LiftoffRegister value = pinned.set(__ PopToRegister(rc));
- Register index = pinned.set(__ PopToRegister(kGpReg, pinned)).gp();
+ LiftoffRegister value = pinned.set(__ PopToRegister());
+ Register index = pinned.set(__ PopToRegister(pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), operand.offset, index, pinned)) {
return;
}
- Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- __ LoadFromContext(addr, offsetof(WasmContext, mem_start), kPointerSize);
+ LiftoffRegister addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kPointerLoadType);
uint32_t protected_store_pc = 0;
- __ Store(addr, index, operand.offset, value, type, pinned,
+ __ Store(addr.gp(), index, operand.offset, value, type, pinned,
&protected_store_pc);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder->position(),
@@ -1152,26 +1367,55 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- __ PrepareCall(operand.sig, call_descriptor);
+ if (operand.index < env_->module->num_imported_functions) {
+ // A direct call to an imported function.
+ LiftoffRegList pinned;
+ LiftoffRegister tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister imported_targets = tmp;
+ LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
+ kPointerLoadType);
+ __ Load(target, imported_targets.gp(), no_reg,
+ operand.index * sizeof(Address), kPointerLoadType, pinned);
+
+ LiftoffRegister imported_instances = tmp;
+ LOAD_INSTANCE_FIELD(imported_instances, ImportedFunctionInstances,
+ kPointerLoadType);
+ LiftoffRegister target_instance = tmp;
+ __ Load(target_instance, imported_instances.gp(), no_reg,
+ compiler::FixedArrayOffsetMinusTag(operand.index),
+ kPointerLoadType, pinned);
- source_position_table_builder_->AddPosition(
- __ pc_offset(), SourcePosition(decoder->position()), false);
+ LiftoffRegister* explicit_instance = &target_instance;
+ Register target_reg = target.gp();
+ __ PrepareCall(operand.sig, call_descriptor, &target_reg,
+ explicit_instance);
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+
+ __ CallIndirect(operand.sig, call_descriptor, target_reg);
+
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+
+ __ FinishCall(operand.sig, call_descriptor);
+ } else {
+ // A direct call within this module just gets the current instance.
+ __ PrepareCall(operand.sig, call_descriptor);
+
+ source_position_table_builder_->AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
- if (FLAG_wasm_jit_to_native) {
// Just encode the function index. This will be patched at instantiation.
Address addr = reinterpret_cast<Address>(operand.index);
__ CallNativeWasmCode(addr);
- } else {
- Handle<Code> target = operand.index < env_->function_code.size()
- ? env_->function_code[operand.index]
- : env_->default_function_code;
- __ Call(target, RelocInfo::CODE_TARGET);
- }
- safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
- __ FinishCall(operand.sig, call_descriptor);
+ __ FinishCall(operand.sig, call_descriptor);
+ }
}
void CallIndirect(Decoder* decoder, const Value& index_val,
@@ -1186,11 +1430,8 @@ class LiftoffCompiler {
return;
}
- // Assume only one table for now.
- uint32_t table_index = 0;
-
// Pop the index.
- LiftoffRegister index = __ PopToRegister(kGpReg);
+ LiftoffRegister index = __ PopToRegister();
// If that register is still being used after popping, we move it to another
// register, because we want to modify that register.
if (__ cache_state()->is_used(index)) {
@@ -1207,111 +1448,56 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister* explicit_context = nullptr;
-
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
- static constexpr LoadType kPointerLoadType =
- kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
- static constexpr int kFixedArrayOffset =
- FixedArray::kHeaderSize - kHeapObjectTag;
-
uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
- if (WASM_CONTEXT_TABLES) {
- // Compare against table size stored in {wasm_context->table_size}.
- __ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size),
- sizeof(uint32_t));
- __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
- index.gp(), tmp_const.gp());
- // Load the table from {wasm_context->table}
- __ LoadFromContext(table.gp(), offsetof(WasmContext, table),
- kPointerSize);
- // Load the signature from {wasm_context->table[$index].sig_id}
- // == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry)
- // + #offsetof(sig_id)
- __ LoadConstant(
- tmp_const,
- WasmValue(static_cast<uint32_t>(sizeof(IndirectFunctionTableEntry))));
+ // Compare against table size stored in
+ // {instance->indirect_function_table_size}.
+ LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize,
+ LoadType::kI32Load);
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
+ index.gp(), tmp_const.gp());
+
+ // Load the signature from {instance->ift_sig_ids[key]}
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kPointerLoadType);
+ __ LoadConstant(tmp_const,
+ WasmValue(static_cast<uint32_t>(sizeof(uint32_t))));
+ // TODO(wasm): use a emit_i32_shli() instead of a multiply.
+ // (currently cannot use shl on ia32/x64 because it clobbers %rcx).
+ __ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
+ __ Load(scratch, table.gp(), index.gp(), 0, LoadType::kI32Load, pinned);
+
+ // Compare against expected signature.
+ __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
+
+ Label* sig_mismatch_label = AddOutOfLineTrap(
+ decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label,
+ LiftoffAssembler::kWasmIntPtr, scratch.gp(),
+ tmp_const.gp());
+
+ if (kPointerSize == 8) {
+ // {index} has already been multiplied by 4. Multiply by another 2.
+ __ LoadConstant(tmp_const, WasmValue(2));
__ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
- __ Load(scratch, table.gp(), index.gp(),
- offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load,
- pinned);
-
- __ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
-
- Label* sig_mismatch_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
- __ emit_cond_jump(kUnequal, sig_mismatch_label,
- LiftoffAssembler::kWasmIntPtr, scratch.gp(),
- tmp_const.gp());
-
- // Load the target address from {wasm_context->table[$index].target}
- __ Load(scratch, table.gp(), index.gp(),
- offsetof(IndirectFunctionTableEntry, target), kPointerLoadType,
- pinned);
-
- // Load the context from {wasm_context->table[$index].context}
- // TODO(wasm): directly allocate the correct context register to avoid
- // any potential moves.
- __ Load(tmp_const, table.gp(), index.gp(),
- offsetof(IndirectFunctionTableEntry, context), kPointerLoadType,
- pinned);
- explicit_context = &tmp_const;
- } else {
- // Compare against table size, which is a patchable constant.
- uint32_t table_size =
- env_->module->function_tables[table_index].initial_size;
-
- __ LoadConstant(tmp_const, WasmValue(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
-
- __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
- index.gp(), tmp_const.gp());
-
- wasm::GlobalHandleAddress function_table_handle_address =
- env_->function_tables[table_index];
- __ LoadConstant(table, WasmPtrValue(function_table_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE);
- __ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
-
- // Load signature from the table and check.
- // The table is a FixedArray; signatures are encoded as SMIs.
- // [sig1, code1, sig2, code2, sig3, code3, ...]
- static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
- static_assert(compiler::kFunctionTableSignatureOffset == 0,
- "consistency");
- static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
- __ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
- // Shift index such that it's the offset of the signature in the
- // FixedArray.
- __ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
-
- // Load the signature.
- __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
- kPointerLoadType, pinned);
-
- __ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
+ }
- Label* sig_mismatch_label = AddOutOfLineTrap(
- decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
- __ emit_cond_jump(kUnequal, sig_mismatch_label,
- LiftoffAssembler::kWasmIntPtr, scratch.gp(),
- tmp_const.gp());
+ // Load the target from {instance->ift_targets[key]}
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kPointerLoadType);
+ __ Load(scratch, table.gp(), index.gp(), 0, kPointerLoadType, pinned);
- // Load code object.
- __ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
- kPointerLoadType, pinned);
-
- // Move the pointer from the Code object to the instruction start.
- __ LoadConstant(tmp_const,
- WasmPtrValue(Code::kHeaderSize - kHeapObjectTag));
- __ emit_ptrsize_add(scratch.gp(), scratch.gp(), tmp_const.gp());
- }
+ // Load the instance from {instance->ift_instances[key]}
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableInstances,
+ kPointerLoadType);
+ __ Load(tmp_const, table.gp(), index.gp(),
+ (FixedArray::kHeaderSize - kHeapObjectTag), kPointerLoadType,
+ pinned);
+ LiftoffRegister* explicit_instance = &tmp_const;
source_position_table_builder_->AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
@@ -1322,7 +1508,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch.gp();
- __ PrepareCall(operand.sig, call_descriptor, &target, explicit_context);
+ __ PrepareCall(operand.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(operand.sig, call_descriptor, target);
safepoint_table_builder_.DefineSafepoint(asm_, Safepoint::kSimple, 0,
@@ -1372,11 +1558,10 @@ class LiftoffCompiler {
// {min_size_} and {max_size_} are cached values computed from the ModuleEnv.
const uint64_t min_size_;
const uint64_t max_size_;
- const compiler::RuntimeExceptionSupport runtime_exception_support_;
bool ok_ = true;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder* const source_position_table_builder_;
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions_;
+ WasmCompilationData* wasm_compilation_data_;
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
@@ -1389,6 +1574,10 @@ class LiftoffCompiler {
// patch the actually needed stack size in the end.
uint32_t pc_offset_stack_frame_construction_ = 0;
+ // Points to the cell within the {code_table_} of the NativeModule,
+ // which corresponds to the currently compiled function
+ WasmCode* const* code_table_entry_ = nullptr;
+
void TraceCacheState(Decoder* decoder) const {
#ifdef DEBUG
if (!FLAG_trace_liftoff || !FLAG_trace_wasm_decoder) return;
@@ -1425,11 +1614,12 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body_.sig);
base::Optional<TimedHistogramScope> liftoff_compile_time_scope(
base::in_place, counters()->liftoff_compile_time());
+ wasm::WasmCode* const* code_table_entry =
+ native_module_->code_table().data() + func_index_;
wasm::WasmFullDecoder<wasm::Decoder::kValidate, wasm::LiftoffCompiler>
decoder(&zone, module, func_body_, &liftoff_.asm_, call_descriptor, env_,
- runtime_exception_support_,
- &liftoff_.source_position_table_builder_,
- protected_instructions_.get(), &zone, &liftoff_.codegen_zone_);
+ &liftoff_.source_position_table_builder_, &wasm_compilation_data_,
+ &zone, &liftoff_.codegen_zone_, code_table_entry);
decoder.Decode();
liftoff_compile_time_scope.reset();
if (!decoder.interface().ok()) {
@@ -1458,6 +1648,8 @@ bool compiler::WasmCompilationUnit::ExecuteLiftoffCompilation() {
#undef __
#undef TRACE
+#undef WASM_INSTANCE_OBJECT_OFFSET
+#undef LOAD_INSTANCE_FIELD
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index eedbf54a17..3db7bdc1a4 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -28,9 +28,13 @@ enum RegClass : uint8_t {
enum RegPairHalf : uint8_t { kLowWord, kHighWord };
+static inline constexpr bool needs_reg_pair(ValueType type) {
+ return kNeedI64RegPair && type == kWasmI64;
+}
+
// TODO(clemensh): Use a switch once we require C++14 support.
static inline constexpr RegClass reg_class_for(ValueType type) {
- return kNeedI64RegPair && type == kWasmI64 // i64 on 32 bit
+ return needs_reg_pair(type) // i64 on 32 bit
? kGpRegPair
: type == kWasmI32 || type == kWasmI64 // int types
? kGpReg
@@ -99,11 +103,10 @@ class LiftoffRegister {
}
}
- static LiftoffRegister ForPair(LiftoffRegister low, LiftoffRegister high) {
+ static LiftoffRegister ForPair(Register low, Register high) {
DCHECK(kNeedI64RegPair);
DCHECK_NE(low, high);
- storage_t combined_code = low.gp().code() |
- high.gp().code() << kBitsPerGpRegCode |
+ storage_t combined_code = low.code() | high.code() << kBitsPerGpRegCode |
1 << (2 * kBitsPerGpRegCode);
return LiftoffRegister(combined_code);
}
@@ -171,8 +174,7 @@ class LiftoffRegister {
explicit constexpr LiftoffRegister(storage_t code) : code_(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegister),
- "LiftoffRegister can efficiently be passed by value");
+ASSERT_TRIVIALLY_COPYABLE(LiftoffRegister);
inline std::ostream& operator<<(std::ostream& os, LiftoffRegister reg) {
if (reg.is_pair()) {
@@ -231,6 +233,8 @@ class LiftoffRegList {
}
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
+ bool has(Register reg) const { return has(LiftoffRegister(reg)); }
+ bool has(DoubleRegister reg) const { return has(LiftoffRegister(reg)); }
constexpr bool is_empty() const { return regs_ == 0; }
@@ -298,8 +302,7 @@ class LiftoffRegList {
// Unchecked constructor. Only use for valid bits.
explicit constexpr LiftoffRegList(storage_t bits) : regs_(bits) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(LiftoffRegList),
- "LiftoffRegList can be passed by value");
+ASSERT_TRIVIALLY_COPYABLE(LiftoffRegList);
static constexpr LiftoffRegList kGpCacheRegList =
LiftoffRegList::FromBits<LiftoffRegList::kGpMask>();
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
new file mode 100644
index 0000000000..cf2df277c9
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips/OWNERS
@@ -0,0 +1,3 @@
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index fda98aea62..317677c97c 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -15,29 +15,77 @@ namespace wasm {
namespace liftoff {
-// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
-// is located at sp-24.
-constexpr int32_t kConstantStackSpace = 16;
+// fp-4 holds the stack marker, fp-8 is the instance parameter, first stack
+// slot is located at fp-16.
+constexpr int32_t kConstantStackSpace = 8;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline MemOperand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(sp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
inline MemOperand GetHalfStackSlot(uint32_t half_index) {
int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
- return MemOperand(sp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
-inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
+
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C. This register must be callee saved according to the c
+// calling convention.
+static constexpr Register kCCallLastArgAddrReg = s1;
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->lw(dst.gp(), src);
+ break;
+ case kWasmF32:
+ assm->lwc1(dst.fp(), src);
+ break;
+ case kWasmF64:
+ assm->Ldc1(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->push(reg.gp());
+ break;
+ case kWasmI64:
+ assm->Push(reg.high_gp(), reg.low_gp());
+ break;
+ case kWasmF32:
+ assm->addiu(sp, sp, -sizeof(float));
+ assm->swc1(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kWasmF64:
+ assm->addiu(sp, sp, -sizeof(double));
+ assm->Sdc1(reg.fp(), MemOperand(sp, 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
uint32_t offset = static_cast<uint32_t>(pc_offset());
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
addiu(sp, sp, 0);
+ nop();
+ nop();
return offset;
}
@@ -47,9 +95,13 @@ void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
- constexpr int kAvailableSpace = 64;
- Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
- patching_assembler.addiu(sp, sp, -bytes);
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(isolate(), buffer_ + offset,
+ kAvailableSpace, CodeObjectRequired::kNo);
+ // If bytes can be represented as 16bit, addiu will be generated and two
+ // nops will stay untouched. Otherwise, lui-ori sequence will load it to
+ // register and, as third instruction, addu will be generated.
+ patching_assembler.Addu(sp, sp, Operand(-bytes));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -77,20 +129,20 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
DCHECK_LE(offset, kMaxInt);
- lw(dst, liftoff::GetContextOperand());
+ lw(dst, liftoff::GetInstanceOperand());
DCHECK_EQ(4, size);
lw(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::SpillContext(Register context) {
- sw(context, liftoff::GetContextOperand());
+void LiftoffAssembler::SpillInstance(Register instance) {
+ sw(instance, liftoff::GetInstanceOperand());
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- lw(dst, liftoff::GetContextOperand());
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ lw(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -224,7 +276,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
@@ -240,9 +293,8 @@ void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg,
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
- reg.is_pair()
- ? LiftoffRegister::ForPair(LiftoffRegister(v0), LiftoffRegister(v1))
- : reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
+ reg.is_pair() ? LiftoffRegister::ForPair(v0, v1)
+ : reg.is_gp() ? LiftoffRegister(v0) : LiftoffRegister(f2);
if (reg != dst) Move(dst, reg, type);
}
@@ -356,11 +408,6 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
-void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
- Register rhs) {
- emit_i32_add(dst, lhs, rhs);
-}
-
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz(dst, src);
return true;
@@ -376,10 +423,10 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
return true;
}
-#define I32_SHIFTOP(name, instruction) \
- void LiftoffAssembler::emit_i32_##name( \
- Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
- instruction(dst, lhs, rhs); \
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register src, Register amount, LiftoffRegList pinned) { \
+ instruction(dst, src, amount); \
}
I32_SHIFTOP(shl, sllv)
@@ -388,32 +435,226 @@ I32_SHIFTOP(shr, srlv)
#undef I32_SHIFTOP
+#define UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ BAILOUT("i64 binop: " #name); \
+ }
+
+UNIMPLEMENTED_I64_BINOP(add)
+UNIMPLEMENTED_I64_BINOP(sub)
+
+#undef UNIMPLEMENTED_I64_BINOP
+
+namespace liftoff {
+
+inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) {
+ DCHECK(pair.is_pair());
+ return pair.low_gp() == reg || pair.high_gp() == reg;
+}
+
+inline void Emit64BitShiftOperation(
+ LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src,
+ Register amount,
+ void (TurboAssembler::*emit_shift)(Register, Register, Register, Register,
+ Register, Register, Register),
+ LiftoffRegList pinned) {
+ Label move, done;
+ pinned.set(dst);
+ pinned.set(src);
+ pinned.set(amount);
+
+ // If shift amount is 0, don't do the shifting.
+ assm->TurboAssembler::Branch(&move, eq, amount, Operand(zero_reg));
+
+ if (liftoff::IsRegInRegPair(dst, amount) || dst.overlaps(src)) {
+ // If some of destination registers are in use, get another, unused pair.
+ // That way we prevent overwriting some input registers while shifting.
+ LiftoffRegister tmp = assm->GetUnusedRegister(kGpRegPair, pinned);
+
+ // Do the actual shift.
+ (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(),
+ src.high_gp(), amount, kScratchReg, kScratchReg2);
+
+ // Place result in destination register.
+ assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp());
+ assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp());
+ } else {
+ (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(),
+ src.high_gp(), amount, kScratchReg, kScratchReg2);
+ }
+ assm->TurboAssembler::Branch(&done);
+
+ // If shift amount is 0, move src to dst.
+ assm->bind(&move);
+ assm->TurboAssembler::Move(dst.high_gp(), src.high_gp());
+ assm->TurboAssembler::Move(dst.low_gp(), src.low_gp());
+
+ assm->bind(&done);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::ShlPair, pinned);
+}
+
+void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::SarPair, pinned);
+}
+
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::Emit64BitShiftOperation(this, dst, src, amount,
+ &TurboAssembler::ShrPair, pinned);
+}
+
#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_UNOP(name) \
+#define FP_UNOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ instruction(dst, src); \
}
FP_BINOP(f32_add, add_s)
FP_BINOP(f32_sub, sub_s)
FP_BINOP(f32_mul, mul_s)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f32_div, div_s)
+FP_UNOP(f32_abs, abs_s)
+FP_UNOP(f32_neg, neg_s)
+FP_UNOP(f32_ceil, Ceil_s_s)
+FP_UNOP(f32_floor, Floor_s_s)
+FP_UNOP(f32_trunc, Trunc_s_s)
+FP_UNOP(f32_nearest_int, Round_s_s)
+FP_UNOP(f32_sqrt, sqrt_s)
FP_BINOP(f64_add, add_d)
FP_BINOP(f64_sub, sub_d)
FP_BINOP(f64_mul, mul_d)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
+FP_BINOP(f64_div, div_d)
+FP_UNOP(f64_abs, abs_d)
+FP_UNOP(f64_neg, neg_d)
+FP_UNOP(f64_sqrt, sqrt_d)
#undef FP_BINOP
-#undef UNIMPLEMENTED_FP_BINOP
+#undef FP_UNOP
+
+void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ Ceil_d_d(dst, src);
+ } else {
+ BAILOUT("emit_f64_ceil");
+ }
+}
+
+void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ Floor_d_d(dst, src);
+ } else {
+ BAILOUT("emit_f64_floor");
+ }
+}
+
+void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ Trunc_d_d(dst, src);
+ } else {
+ BAILOUT("emit_f64_trunc");
+ }
+}
+
+void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
+ IsFp64Mode()) {
+ Round_d_d(dst, src);
+ } else {
+ BAILOUT("emit_f64_nearest_int");
+ }
+}
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::Move(dst.gp(), src.low_gp());
+ return true;
+ case kExprI32ReinterpretF32:
+ mfc1(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ TurboAssembler::Move(dst.low_gp(), src.gp());
+ TurboAssembler::Move(dst.high_gp(), src.gp());
+ sra(dst.high_gp(), dst.high_gp(), 31);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::Move(dst.low_gp(), src.gp());
+ TurboAssembler::Move(dst.high_gp(), zero_reg);
+ return true;
+ case kExprI64ReinterpretF64:
+ mfc1(dst.low_gp(), src.fp());
+ TurboAssembler::Mfhc1(dst.high_gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ mtc1(src.gp(), scratch.fp());
+ cvt_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
+ cvt_s_d(dst.fp(), dst.fp());
+ return true;
+ }
+ case kExprF32ConvertF64:
+ cvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ mtc1(src.gp(), scratch.fp());
+ cvt_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp());
+ return true;
+ }
+ case kExprF64ConvertF32:
+ cvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ mtc1(src.low_gp(), dst.fp());
+ TurboAssembler::Mthc1(src.high_gp(), dst.fp());
+ return true;
+ default:
+ return false;
+ }
+}
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
@@ -424,31 +665,85 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
}
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltiu(dst, src, 1);
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- Label true_label;
- if (dst != lhs) {
- ori(dst, zero_reg, 0x1);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
}
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
- if (rhs != no_reg) {
- TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
- } else {
- TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ Register tmp =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(src, dst)).gp();
+ sltiu(tmp, src.low_gp(), 1);
+ sltiu(dst, src.high_gp(), 1);
+ and_(dst, dst, tmp);
+}
+
+namespace liftoff {
+inline Condition cond_make_unsigned(Condition cond) {
+ switch (cond) {
+ case kSignedLessThan:
+ return kUnsignedLessThan;
+ case kSignedLessEqual:
+ return kUnsignedLessEqual;
+ case kSignedGreaterThan:
+ return kUnsignedGreaterThan;
+ case kSignedGreaterEqual:
+ return kUnsignedGreaterEqual;
+ default:
+ return cond;
}
- // If not true, set on 0.
- TurboAssembler::mov(dst, zero_reg);
+}
+} // namespace liftoff
- if (dst != lhs) {
- bind(&true_label);
- } else {
- Label end_label;
- TurboAssembler::Branch(&end_label);
- bind(&true_label);
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Label low, cont;
- ori(dst, zero_reg, 0x1);
- bind(&end_label);
+ // For signed i64 comparisons, we still need to use unsigned comparison for
+ // the low word (the only bit carrying signedness information is the MSB in
+ // the high word).
+ Condition unsigned_cond = liftoff::cond_make_unsigned(cond);
+
+ Register tmp = dst;
+ if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) {
+ tmp =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, lhs, rhs)).gp();
}
+
+ // Write 1 initially in tmp register.
+ TurboAssembler::li(tmp, 1);
+
+ // If high words are equal, then compare low words, else compare high.
+ Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp()));
+
+ TurboAssembler::LoadZeroOnCondition(
+ tmp, lhs.high_gp(), Operand(rhs.high_gp()), NegateCondition(cond));
+ Branch(&cont);
+
+ bind(&low);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.low_gp(), Operand(rhs.low_gp()),
+ NegateCondition(unsigned_cond));
+
+ bind(&cont);
+ // Move result to dst register if needed.
+ TurboAssembler::Move(dst, tmp);
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
@@ -457,7 +752,19 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(
+ tmp.gp(), Operand(ExternalReference::address_of_stack_limit(isolate())));
+ TurboAssembler::Ulw(tmp.gp(), MemOperand(tmp.gp()));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(tmp.gp()));
+}
void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
@@ -466,72 +773,185 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ if (emit_debug_code()) Abort(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
- BAILOUT("PushCallerFrameSlot");
+ switch (src.loc()) {
+ case VarState::kStack: {
+ if (src.type() == kWasmF64) {
+ DCHECK_EQ(kLowWord, half);
+ lw(at, liftoff::GetHalfStackSlot(2 * src_index - 1));
+ push(at);
+ }
+ lw(at,
+ liftoff::GetHalfStackSlot(2 * src_index + (half == kLowWord ? 0 : 1)));
+ push(at);
+ break;
+ }
+ case VarState::kRegister:
+ if (src.type() == kWasmI64) {
+ PushCallerFrameSlot(
+ half == kLowWord ? src.reg().low() : src.reg().high(), kWasmI32);
+ } else {
+ PushCallerFrameSlot(src.reg(), src.type());
+ }
+ break;
+ case VarState::KIntConst: {
+ // The high word is the sign extension of the low word.
+ li(at,
+ Operand(half == kLowWord ? src.i32_const() : src.i32_const() >> 31));
+ push(at);
+ break;
+ }
+ }
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
- BAILOUT("PushCallerFrameSlot reg");
+ liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kPointerSize;
+ addiu(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kPointerSize;
+ sw(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ addiu(sp, sp, -(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) addiu(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ lw(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kPointerSize;
+ }
+ addiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- BAILOUT("PrepareCCall");
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
+ int pushed_bytes = 0;
+ for (ValueType param_type : sig->parameters()) {
+ pushed_bytes += RoundUp<kPointerSize>(WasmOpcodes::MemSize(param_type));
+ liftoff::push(this, *args++, param_type);
+ }
+ if (out_argument_type != kWasmStmt) {
+ int size = RoundUp<kPointerSize>(WasmOpcodes::MemSize(out_argument_type));
+ addiu(sp, sp, -size);
+ pushed_bytes += size;
+ }
+ // Save the original sp (before the first push), such that we can later
+ // compute pointers to the pushed values. Do this only *after* pushing the
+ // values, because {kCCallLastArgAddrReg} might collide with an arg register.
+ addiu(liftoff::kCCallLastArgAddrReg, sp, pushed_bytes);
+ constexpr Register kScratch = at;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
+ (out_argument_type != kWasmStmt);
+ PrepareCallCFunction(num_c_call_arguments, kScratch);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
+ addiu(dst, liftoff::kCCallLastArgAddrReg, -param_byte_offset);
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
- BAILOUT("SetCCallRegParamAddr");
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
+ static constexpr Register kScratch = at;
+ SetCCallRegParamAddr(kScratch, param_byte_offset, type);
+ sw(kScratch, MemOperand(sp, stack_param_idx * kPointerSize));
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
- BAILOUT("SetCCallStackParamAddr");
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
+ MemOperand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- BAILOUT("CallC");
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::FinishCCall() {
+ TurboAssembler::Move(sp, liftoff::kCCallLastArgAddrReg);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- BAILOUT("CallRuntime");
+ // Set instance to zero.
+ TurboAssembler::Move(cp, zero_reg);
+ CallRuntimeDelayed(zone, fid);
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ if (target == no_reg) {
+ pop(at);
+ Call(at);
+ } else {
+ Call(target);
+ }
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ addiu(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ addiu(sp, sp, size);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
new file mode 100644
index 0000000000..cf2df277c9
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/mips64/OWNERS
@@ -0,0 +1,3 @@
+ivica.bogosavljevic@mips.com
+Miran.Karic@mips.com
+sreten.kovacevic@mips.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index d215f4178c..9397494cab 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -15,36 +15,89 @@ namespace wasm {
namespace liftoff {
-// sp-8 holds the stack marker, sp-16 is the wasm context, first stack slot
-// is located at sp-24.
+// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack
+// slot is located at fp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
inline MemOperand GetStackSlot(uint32_t index) {
int32_t offset = index * LiftoffAssembler::kStackSlotSize;
- return MemOperand(sp, -kFirstStackSlotOffset - offset);
+ return MemOperand(fp, -kFirstStackSlotOffset - offset);
}
-inline MemOperand GetContextOperand() { return MemOperand(sp, -16); }
+inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
+
+// Use this register to store the address of the last argument pushed on the
+// stack for a call to C. This register must be callee saved according to the c
+// calling convention.
+static constexpr Register kCCallLastArgAddrReg = s1;
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->lw(dst.gp(), src);
+ break;
+ case kWasmI64:
+ assm->ld(dst.gp(), src);
+ break;
+ case kWasmF32:
+ assm->lwc1(dst.fp(), src);
+ break;
+ case kWasmF64:
+ assm->Ldc1(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ case kWasmI64:
+ assm->push(reg.gp());
+ break;
+ case kWasmF32:
+ assm->daddiu(sp, sp, -kPointerSize);
+ assm->swc1(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kWasmF64:
+ assm->daddiu(sp, sp, -kPointerSize);
+ assm->Sdc1(reg.fp(), MemOperand(sp, 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
uint32_t offset = static_cast<uint32_t>(pc_offset());
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
daddiu(sp, sp, 0);
+ nop();
+ nop();
return offset;
}
void LiftoffAssembler::PatchPrepareStackFrame(uint32_t offset,
uint32_t stack_slots) {
- uint32_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
+ uint64_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots;
DCHECK_LE(bytes, kMaxInt);
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- Assembler patching_assembler(isolate(), buffer_ + offset, kAvailableSpace);
- patching_assembler.daddiu(sp, sp, -bytes);
+ TurboAssembler patching_assembler(isolate(), buffer_ + offset,
+ kAvailableSpace, CodeObjectRequired::kNo);
+ // If bytes can be represented as 16bit, daddiu will be generated and two
+ // nops will stay untouched. Otherwise, lui-ori sequence will load it to
+ // register and, as third instruction, daddu will be generated.
+ patching_assembler.Daddu(sp, sp, Operand(-bytes));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -67,10 +120,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
DCHECK_LE(offset, kMaxInt);
- ld(dst, liftoff::GetContextOperand());
+ ld(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
lw(dst, MemOperand(dst, offset));
@@ -79,12 +132,12 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::SpillContext(Register context) {
- sd(context, liftoff::GetContextOperand());
+void LiftoffAssembler::SpillInstance(Register instance) {
+ sd(instance, liftoff::GetInstanceOperand());
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- ld(dst, liftoff::GetContextOperand());
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ ld(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -182,7 +235,8 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
- BAILOUT("LoadCallerFrameSlot");
+ MemOperand src(fp, kPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
@@ -301,11 +355,6 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
-void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
- Register rhs) {
- TurboAssembler::Daddu(dst, lhs, rhs);
-}
-
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz(dst, src);
return true;
@@ -321,10 +370,10 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
return true;
}
-#define I32_SHIFTOP(name, instruction) \
- void LiftoffAssembler::emit_i32_##name( \
- Register dst, Register lhs, Register rhs, LiftoffRegList pinned) { \
- instruction(dst, lhs, rhs); \
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name( \
+ Register dst, Register src, Register amount, LiftoffRegList pinned) { \
+ instruction(dst, src, amount); \
}
I32_SHIFTOP(shl, sllv)
@@ -333,32 +382,141 @@ I32_SHIFTOP(shr, srlv)
#undef I32_SHIFTOP
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Daddu(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ BAILOUT("i64_sub");
+}
+
+#define I64_BINOP(name) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ BAILOUT("i64_" #name); \
+ }
+
+// clang-format off
+I64_BINOP(and)
+I64_BINOP(or)
+I64_BINOP(xor)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
+ LiftoffRegister src, Register amount, \
+ LiftoffRegList pinned) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+
+I64_SHIFTOP(shl, dsllv)
+I64_SHIFTOP(sar, dsrav)
+I64_SHIFTOP(shr, dsrlv)
+
+#undef I64_SHIFTOP
+
#define FP_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
instruction(dst, lhs, rhs); \
}
-#define UNIMPLEMENTED_FP_UNOP(name) \
+#define FP_UNOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ instruction(dst, src); \
}
FP_BINOP(f32_add, add_s)
FP_BINOP(f32_sub, sub_s)
FP_BINOP(f32_mul, mul_s)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
+FP_BINOP(f32_div, div_s)
+FP_UNOP(f32_abs, abs_s)
+FP_UNOP(f32_neg, neg_s)
+FP_UNOP(f32_ceil, Ceil_s_s)
+FP_UNOP(f32_floor, Floor_s_s)
+FP_UNOP(f32_trunc, Trunc_s_s)
+FP_UNOP(f32_nearest_int, Round_s_s)
+FP_UNOP(f32_sqrt, sqrt_s)
FP_BINOP(f64_add, add_d)
FP_BINOP(f64_sub, sub_d)
FP_BINOP(f64_mul, mul_d)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
+FP_BINOP(f64_div, div_d)
+FP_UNOP(f64_abs, abs_d)
+FP_UNOP(f64_neg, neg_d)
+FP_UNOP(f64_ceil, Ceil_d_d)
+FP_UNOP(f64_floor, Floor_d_d)
+FP_UNOP(f64_trunc, Trunc_d_d)
+FP_UNOP(f64_nearest_int, Round_d_d)
+FP_UNOP(f64_sqrt, sqrt_d)
#undef FP_BINOP
-#undef UNIMPLEMENTED_FP_BINOP
+#undef FP_UNOP
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32);
+ return true;
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ sll(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32);
+ return true;
+ case kExprI64ReinterpretF64:
+ dmfc1(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ mtc1(src.gp(), scratch.fp());
+ cvt_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ cvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ mtc1(src.gp(), scratch.fp());
+ cvt_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ cvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ dmtc1(src.gp(), dst.fp());
+ return true;
+ default:
+ return false;
+ }
+}
void LiftoffAssembler::emit_jump(Label* label) {
TurboAssembler::Branch(label);
}
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
@@ -369,31 +527,48 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
}
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltiu(dst, src, 1);
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- Label true_label;
- if (dst != lhs) {
- ori(dst, zero_reg, 0x1);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
}
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
- if (rhs != no_reg) {
- TurboAssembler::Branch(&true_label, cond, lhs, Operand(rhs));
- } else {
- TurboAssembler::Branch(&true_label, cond, lhs, Operand(zero_reg));
- }
- // If not true, set on 0.
- TurboAssembler::mov(dst, zero_reg);
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
- if (dst != lhs) {
- bind(&true_label);
- } else {
- Label end_label;
- TurboAssembler::Branch(&end_label);
- bind(&true_label);
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltiu(dst, src.gp(), 1);
+}
- ori(dst, zero_reg, 0x1);
- bind(&end_label);
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
}
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
}
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
@@ -402,7 +577,19 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
BAILOUT("emit_f32_set_cond");
}
-void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code) {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ TurboAssembler::li(
+ tmp.gp(), Operand(ExternalReference::address_of_stack_limit(isolate())));
+ TurboAssembler::Uld(tmp.gp(), MemOperand(tmp.gp()));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(tmp.gp()));
+}
void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
@@ -411,72 +598,168 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- BAILOUT("AssertUnreachable");
+ if (emit_debug_code()) Abort(reason);
}
void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
uint32_t src_index,
RegPairHalf half) {
- BAILOUT("PushCallerFrameSlot");
+ switch (src.loc()) {
+ case VarState::kStack:
+ ld(at, liftoff::GetStackSlot(src_index));
+ push(at);
+ break;
+ case VarState::kRegister:
+ PushCallerFrameSlot(src.reg(), src.type());
+ break;
+ case VarState::KIntConst: {
+ li(at, Operand(src.i32_const()));
+ push(at);
+ break;
+ }
+ }
}
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
- BAILOUT("PushCallerFrameSlot reg");
+ liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- BAILOUT("PushRegisters");
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kPointerSize;
+ daddiu(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kPointerSize;
+ sd(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ daddiu(sp, sp, -(num_fp_regs * kStackSlotSize));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- BAILOUT("PopRegisters");
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) daddiu(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ ld(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kPointerSize;
+ }
+ daddiu(sp, sp, gp_offset);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
DCHECK_LT(num_stack_slots, (1 << 16) / kPointerSize); // 16 bit immediate
- TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots * kPointerSize));
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- BAILOUT("PrepareCCall");
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
+ for (ValueType param_type : sig->parameters()) {
+ liftoff::push(this, *args++, param_type);
+ }
+ if (out_argument_type != kWasmStmt) {
+ daddiu(sp, sp, -kPointerSize);
+ }
+ // Save the original sp (before the first push), such that we can later
+ // compute pointers to the pushed values. Do this only *after* pushing the
+ // values, because {kCCallLastArgAddrReg} might collide with an arg register.
+ int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
+ (out_argument_type != kWasmStmt);
+ int pushed_bytes = kPointerSize * num_c_call_arguments;
+ daddiu(liftoff::kCCallLastArgAddrReg, sp, pushed_bytes);
+ constexpr Register kScratch = at;
+ static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
+ PrepareCallCFunction(num_c_call_arguments, kScratch);
+}
+
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
+ daddiu(dst, liftoff::kCCallLastArgAddrReg, -param_byte_offset);
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
- BAILOUT("SetCCallRegParamAddr");
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
+ static constexpr Register kScratch = at;
+ SetCCallRegParamAddr(kScratch, param_byte_offset, type);
+ sd(kScratch, MemOperand(sp, stack_param_idx * kPointerSize));
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
- BAILOUT("SetCCallStackParamAddr");
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
+ MemOperand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
- BAILOUT("CallC");
+ CallCFunction(ext_ref, static_cast<int>(num_params));
+}
+
+void LiftoffAssembler::FinishCCall() {
+ TurboAssembler::Move(sp, liftoff::kCCallLastArgAddrReg);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- BAILOUT("CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- BAILOUT("CallRuntime");
+ // Set instance to zero.
+ TurboAssembler::Move(cp, zero_reg);
+ CallRuntimeDelayed(zone, fid);
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
- BAILOUT("CallIndirect");
+ if (target == no_reg) {
+ pop(at);
+ Call(at);
+ } else {
+ Call(target);
+ }
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- BAILOUT("AllocateStackSlot");
+ daddiu(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- BAILOUT("DeallocateStackSlot");
+ daddiu(sp, sp, size);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index efbb6896d6..39aa3fcb94 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
- BAILOUT("LoadFromContext");
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
+ BAILOUT("LoadFromInstance");
}
-void LiftoffAssembler::SpillContext(Register context) {
- BAILOUT("SpillContext");
+void LiftoffAssembler::SpillInstance(Register instance) {
+ BAILOUT("SpillInstance");
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- BAILOUT("FillContextInto");
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -101,26 +101,36 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop"); \
+ BAILOUT("gp binop: " #name); \
+ }
+#define UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ BAILOUT("i64 binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop"); \
+ BAILOUT("gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop"); \
+ BAILOUT("fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ BAILOUT("fp unop: " #name); \
+ }
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i32 shiftop: " #name); \
}
-#define UNIMPLEMENTED_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
- LiftoffRegList pinned) { \
- BAILOUT("shiftop"); \
+#define UNIMPLEMENTED_I64_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i64 shiftop: " #name); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -129,47 +139,101 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_SHIFTOP(i32_shl)
-UNIMPLEMENTED_SHIFTOP(i32_sar)
-UNIMPLEMENTED_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
+UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_sub)
+#ifdef V8_TARGET_ARCH_PPC64
+UNIMPLEMENTED_I64_BINOP(i64_and)
+UNIMPLEMENTED_I64_BINOP(i64_or)
+UNIMPLEMENTED_I64_BINOP(i64_xor)
+#endif
+UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
+UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_BINOP(f32_div)
+UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_UNOP(f32_ceil)
+UNIMPLEMENTED_FP_UNOP(f32_floor)
+UNIMPLEMENTED_FP_UNOP(f32_trunc)
+UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_BINOP(f64_div)
+UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
+UNIMPLEMENTED_FP_UNOP(f64_ceil)
+UNIMPLEMENTED_FP_UNOP(f64_floor)
+UNIMPLEMENTED_FP_UNOP(f64_trunc)
+UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I64_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_SHIFTOP
+#undef UNIMPLEMENTED_I32_SHIFTOP
+#undef UNIMPLEMENTED_I64_SHIFTOP
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_type_conversion");
+ return true;
+}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
BAILOUT("emit_cond_jump");
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ BAILOUT("emit_i32_eqz");
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
BAILOUT("emit_i32_set_cond");
}
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ BAILOUT("emit_i64_eqz");
+}
+
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ BAILOUT("emit_i64_set_cond");
+}
+
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -203,25 +267,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ BAILOUT("LoadCCallOutArgument");
+}
+
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
+void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 62145fadca..a66e6684ea 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -28,17 +28,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
BAILOUT("LoadConstant");
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
- BAILOUT("LoadFromContext");
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
+ BAILOUT("LoadFromInstance");
}
-void LiftoffAssembler::SpillContext(Register context) {
- BAILOUT("SpillContext");
+void LiftoffAssembler::SpillInstance(Register instance) {
+ BAILOUT("SpillInstance");
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- BAILOUT("FillContextInto");
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ BAILOUT("FillInstanceInto");
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
@@ -101,26 +101,36 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t half_index) {
#define UNIMPLEMENTED_GP_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop"); \
+ BAILOUT("gp binop: " #name); \
+ }
+#define UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ LiftoffRegister rhs) { \
+ BAILOUT("i64 binop: " #name); \
}
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
- BAILOUT("gp unop"); \
+ BAILOUT("gp unop: " #name); \
return true; \
}
#define UNIMPLEMENTED_FP_BINOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
- BAILOUT("fp binop"); \
+ BAILOUT("fp binop: " #name); \
}
#define UNIMPLEMENTED_FP_UNOP(name) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- BAILOUT("fp unop"); \
+ BAILOUT("fp unop: " #name); \
+ }
+#define UNIMPLEMENTED_I32_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i32 shiftop: " #name); \
}
-#define UNIMPLEMENTED_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, Register rhs, \
- LiftoffRegList pinned) { \
- BAILOUT("shiftop"); \
+#define UNIMPLEMENTED_I64_SHIFTOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
+ Register amount, LiftoffRegList pinned) { \
+ BAILOUT("i64 shiftop: " #name); \
}
UNIMPLEMENTED_GP_BINOP(i32_add)
@@ -129,47 +139,101 @@ UNIMPLEMENTED_GP_BINOP(i32_mul)
UNIMPLEMENTED_GP_BINOP(i32_and)
UNIMPLEMENTED_GP_BINOP(i32_or)
UNIMPLEMENTED_GP_BINOP(i32_xor)
-UNIMPLEMENTED_SHIFTOP(i32_shl)
-UNIMPLEMENTED_SHIFTOP(i32_sar)
-UNIMPLEMENTED_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
+UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
+UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
+UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP(i64_sub)
+#ifdef V8_TARGET_ARCH_S390X
+UNIMPLEMENTED_I64_BINOP(i64_and)
+UNIMPLEMENTED_I64_BINOP(i64_or)
+UNIMPLEMENTED_I64_BINOP(i64_xor)
+#endif
+UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
+UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
+UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
UNIMPLEMENTED_GP_UNOP(i32_clz)
UNIMPLEMENTED_GP_UNOP(i32_ctz)
UNIMPLEMENTED_GP_UNOP(i32_popcnt)
-UNIMPLEMENTED_GP_BINOP(ptrsize_add)
UNIMPLEMENTED_FP_BINOP(f32_add)
UNIMPLEMENTED_FP_BINOP(f32_sub)
UNIMPLEMENTED_FP_BINOP(f32_mul)
+UNIMPLEMENTED_FP_BINOP(f32_div)
+UNIMPLEMENTED_FP_UNOP(f32_abs)
UNIMPLEMENTED_FP_UNOP(f32_neg)
+UNIMPLEMENTED_FP_UNOP(f32_ceil)
+UNIMPLEMENTED_FP_UNOP(f32_floor)
+UNIMPLEMENTED_FP_UNOP(f32_trunc)
+UNIMPLEMENTED_FP_UNOP(f32_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f32_sqrt)
UNIMPLEMENTED_FP_BINOP(f64_add)
UNIMPLEMENTED_FP_BINOP(f64_sub)
UNIMPLEMENTED_FP_BINOP(f64_mul)
+UNIMPLEMENTED_FP_BINOP(f64_div)
+UNIMPLEMENTED_FP_UNOP(f64_abs)
UNIMPLEMENTED_FP_UNOP(f64_neg)
+UNIMPLEMENTED_FP_UNOP(f64_ceil)
+UNIMPLEMENTED_FP_UNOP(f64_floor)
+UNIMPLEMENTED_FP_UNOP(f64_trunc)
+UNIMPLEMENTED_FP_UNOP(f64_nearest_int)
+UNIMPLEMENTED_FP_UNOP(f64_sqrt)
#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I64_BINOP
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_SHIFTOP
+#undef UNIMPLEMENTED_I32_SHIFTOP
+#undef UNIMPLEMENTED_I64_SHIFTOP
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ BAILOUT("emit_type_conversion");
+ return true;
+}
void LiftoffAssembler::emit_jump(Label* label) { BAILOUT("emit_jump"); }
+void LiftoffAssembler::emit_jump(Register target) { BAILOUT("emit_jump"); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
BAILOUT("emit_cond_jump");
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ BAILOUT("emit_i32_eqz");
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
BAILOUT("emit_i32_set_cond");
}
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ BAILOUT("emit_i64_eqz");
+}
+
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ BAILOUT("emit_i64_set_cond");
+}
+
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("emit_f32_set_cond");
}
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ BAILOUT("emit_f64_set_cond");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code) { BAILOUT("StackCheck"); }
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -203,25 +267,34 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet");
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
BAILOUT("PrepareCCall");
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ BAILOUT("LoadCCallOutArgument");
+}
+
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC");
}
+void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index c1f316072d..f071a003ea 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -14,10 +14,14 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define REQUIRE_CPU_FEATURE(name) \
+ if (!CpuFeatures::IsSupported(name)) return bailout("no " #name); \
+ CpuFeatureScope feature(this, name);
+
namespace liftoff {
-// rbp-8 holds the stack marker, rbp-16 is the wasm context, first stack slot
-// is located at rbp-24.
+// rbp-8 holds the stack marker, rbp-16 is the instance parameter, first stack
+// slot is located at rbp-24.
constexpr int32_t kConstantStackSpace = 16;
constexpr int32_t kFirstStackSlotOffset =
kConstantStackSpace + LiftoffAssembler::kStackSlotSize;
@@ -27,17 +31,13 @@ inline Operand GetStackSlot(uint32_t index) {
return Operand(rbp, -kFirstStackSlotOffset - offset);
}
-inline Operand GetHalfStackSlot(uint32_t half_index) {
- int32_t offset = half_index * (LiftoffAssembler::kStackSlotSize / 2);
- return Operand(rbp, -kFirstStackSlotOffset - offset);
-}
-
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
-inline Operand GetContextOperand() { return Operand(rbp, -16); }
+inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
// Use this register to store the address of the last argument pushed on the
-// stack for a call to C.
-static constexpr Register kCCallLastArgAddrReg = rax;
+// stack for a call to C. This register must be callee saved according to the c
+// calling convention.
+static constexpr Register kCCallLastArgAddrReg = rbx;
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm, LiftoffRegList pinned) {
@@ -49,6 +49,45 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
return Operand(addr, offset, times_1, offset_imm);
}
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
+ ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ assm->movl(dst.gp(), src);
+ break;
+ case kWasmI64:
+ assm->movq(dst.gp(), src);
+ break;
+ case kWasmF32:
+ assm->Movss(dst.fp(), src);
+ break;
+ case kWasmF64:
+ assm->Movsd(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ case kWasmI64:
+ assm->pushq(reg.gp());
+ break;
+ case kWasmF32:
+ assm->subp(rsp, Immediate(kPointerSize));
+ assm->Movss(Operand(rsp, 0), reg.fp());
+ break;
+ case kWasmF64:
+ assm->subp(rsp, Immediate(kPointerSize));
+ assm->Movsd(Operand(rsp, 0), reg.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace liftoff
uint32_t LiftoffAssembler::PrepareStackFrame() {
@@ -96,10 +135,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
- int size) {
+void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset,
+ int size) {
DCHECK_LE(offset, kMaxInt);
- movp(dst, liftoff::GetContextOperand());
+ movp(dst, liftoff::GetInstanceOperand());
DCHECK(size == 4 || size == 8);
if (size == 4) {
movl(dst, Operand(dst, offset));
@@ -108,18 +147,21 @@ void LiftoffAssembler::LoadFromContext(Register dst, uint32_t offset,
}
}
-void LiftoffAssembler::SpillContext(Register context) {
- movp(liftoff::GetContextOperand(), context);
+void LiftoffAssembler::SpillInstance(Register instance) {
+ movp(liftoff::GetInstanceOperand(), instance);
}
-void LiftoffAssembler::FillContextInto(Register dst) {
- movp(dst, liftoff::GetContextOperand());
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ movp(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
Operand src_op =
liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm, pinned);
if (protected_load_pc) *protected_load_pc = pc_offset();
@@ -169,6 +211,9 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
Operand dst_op =
liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm, pinned);
if (protected_store_pc) *protected_store_pc = pc_offset();
@@ -203,22 +248,7 @@ void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
ValueType type) {
Operand src(rbp, kPointerSize * (caller_slot_idx + 1));
- switch (type) {
- case kWasmI32:
- movl(dst.gp(), src);
- break;
- case kWasmI64:
- movq(dst.gp(), src);
- break;
- case kWasmF32:
- Movss(dst.fp(), src);
- break;
- case kWasmF64:
- Movsd(dst.fp(), src);
- break;
- default:
- UNREACHABLE();
- }
+ liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
@@ -294,19 +324,21 @@ void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
movl(dst, Immediate(value.to_i32()));
break;
case kWasmI64: {
- // We could use movq, but this would require a temporary register. For
- // simplicity (and to avoid potentially having to spill another register),
- // we use two movl instructions.
- int32_t low_word = static_cast<int32_t>(value.to_i64());
- int32_t high_word = static_cast<int32_t>(value.to_i64() >> 32);
- movl(dst, Immediate(low_word));
- movl(liftoff::GetHalfStackSlot(2 * index + 1), Immediate(high_word));
+ if (is_int32(value.to_i64())) {
+ // Sign extend low word.
+ movq(dst, Immediate(static_cast<int32_t>(value.to_i64())));
+ } else if (is_uint32(value.to_i64())) {
+ // Zero extend low word.
+ movl(kScratchRegister, Immediate(static_cast<int32_t>(value.to_i64())));
+ movq(dst, kScratchRegister);
+ } else {
+ movq(kScratchRegister, value.to_i64());
+ movq(dst, kScratchRegister);
+ }
break;
}
- case kWasmF32:
- movl(dst, Immediate(value.to_f32_boxed().get_bits()));
- break;
default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
UNREACHABLE();
}
}
@@ -354,54 +386,69 @@ void LiftoffAssembler::emit_i32_sub(Register dst, Register lhs, Register rhs) {
}
}
-#define COMMUTATIVE_I32_BINOP(name, instruction) \
- void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
- Register rhs) { \
- if (dst == rhs) { \
- instruction##l(dst, lhs); \
- } else { \
- if (dst != lhs) movl(dst, lhs); \
- instruction##l(dst, rhs); \
- } \
+namespace liftoff {
+template <void (Assembler::*op)(Register, Register),
+ void (Assembler::*mov)(Register, Register)>
+void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
+ Register rhs) {
+ if (dst == rhs) {
+ (assm->*op)(dst, lhs);
+ } else {
+ if (dst != lhs) (assm->*mov)(dst, lhs);
+ (assm->*op)(dst, rhs);
}
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::imull, &Assembler::movl>(this, dst,
+ lhs, rhs);
+}
-// clang-format off
-COMMUTATIVE_I32_BINOP(mul, imul)
-COMMUTATIVE_I32_BINOP(and, and)
-COMMUTATIVE_I32_BINOP(or, or)
-COMMUTATIVE_I32_BINOP(xor, xor)
-// clang-format on
+void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::andl, &Assembler::movl>(this, dst,
+ lhs, rhs);
+}
-#undef COMMUTATIVE_I32_BINOP
+void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::orl, &Assembler::movl>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::xorl, &Assembler::movl>(this, dst,
+ lhs, rhs);
+}
namespace liftoff {
+template <ValueType type>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
- Register lhs, Register rhs,
+ Register src, Register amount,
void (Assembler::*emit_shift)(Register),
LiftoffRegList pinned) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->movl(kScratchRegister, lhs);
- if (rhs != rcx) assm->movl(rcx, rhs);
+ assm->Move(kScratchRegister, src, type);
+ if (amount != rcx) assm->Move(rcx, amount, type);
(assm->*emit_shift)(kScratchRegister);
- assm->movl(rcx, kScratchRegister);
+ assm->Move(rcx, kScratchRegister, type);
return;
}
- // Move rhs into rcx. If rcx is in use, move its content into the scratch
- // register. If lhs is rcx, lhs is now the scratch register.
+ // Move amount into rcx. If rcx is in use, move its content into the scratch
+ // register. If src is rcx, src is now the scratch register.
bool use_scratch = false;
- if (rhs != rcx) {
- use_scratch = lhs == rcx ||
+ if (amount != rcx) {
+ use_scratch = src == rcx ||
assm->cache_state()->is_used(LiftoffRegister(rcx)) ||
pinned.has(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
- if (lhs == rcx) lhs = kScratchRegister;
- assm->movl(rcx, rhs);
+ if (src == rcx) src = kScratchRegister;
+ assm->Move(rcx, amount, type);
}
// Do the actual shift.
- if (dst != lhs) assm->movl(dst, lhs);
+ if (dst != src) assm->Move(dst, src, type);
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -409,19 +456,22 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
}
} // namespace liftoff
-void LiftoffAssembler::emit_i32_shl(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_shl(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shll_cl, pinned);
+ liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
+ &Assembler::shll_cl, pinned);
}
-void LiftoffAssembler::emit_i32_sar(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_sar(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::sarl_cl, pinned);
+ liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
+ &Assembler::sarl_cl, pinned);
}
-void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, Register rhs,
+void LiftoffAssembler::emit_i32_shr(Register dst, Register src, Register amount,
LiftoffRegList pinned) {
- liftoff::EmitShiftOperation(this, dst, lhs, rhs, &Assembler::shrl_cl, pinned);
+ liftoff::EmitShiftOperation<kWasmI32>(this, dst, src, amount,
+ &Assembler::shrl_cl, pinned);
}
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
@@ -465,15 +515,62 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
return true;
}
-void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
- Register rhs) {
- if (lhs != dst) {
- leap(dst, Operand(lhs, rhs, times_1, 0));
+void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ if (lhs.gp() != dst.gp()) {
+ leap(dst.gp(), Operand(lhs.gp(), rhs.gp(), times_1, 0));
+ } else {
+ addp(dst.gp(), rhs.gp());
+ }
+}
+
+void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ if (dst.gp() == rhs.gp()) {
+ negq(dst.gp());
+ addq(dst.gp(), lhs.gp());
} else {
- addp(dst, rhs);
+ if (dst.gp() != lhs.gp()) movq(dst.gp(), lhs.gp());
+ subq(dst.gp(), rhs.gp());
}
}
+void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::andq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), rhs.gp());
+}
+
+void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::orq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), rhs.gp());
+}
+
+void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitCommutativeBinOp<&Assembler::xorq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), rhs.gp());
+}
+
+void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shlq_cl, pinned);
+}
+
+void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::sarq_cl, pinned);
+}
+
+void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
+ Register amount, LiftoffRegList pinned) {
+ liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shrq_cl, pinned);
+}
+
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -515,6 +612,32 @@ void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
+void LiftoffAssembler::emit_f32_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vdivss(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movss(kScratchDoubleReg, rhs);
+ movss(dst, lhs);
+ divss(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movss(dst, lhs);
+ divss(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint32_t kSignBit = uint32_t{1} << 31;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ Andps(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit - 1);
+ Andps(dst, src);
+ }
+}
+
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint32_t kSignBit = uint32_t{1} << 31;
if (dst == src) {
@@ -526,6 +649,31 @@ void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
}
}
+void LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundss(dst, src, kRoundUp);
+}
+
+void LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundss(dst, src, kRoundDown);
+}
+
+void LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundss(dst, src, kRoundToZero);
+}
+
+void LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundss(dst, src, kRoundToNearest);
+}
+
+void LiftoffAssembler::emit_f32_sqrt(DoubleRegister dst, DoubleRegister src) {
+ Sqrtss(dst, src);
+}
+
void LiftoffAssembler::emit_f64_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -567,6 +715,32 @@ void LiftoffAssembler::emit_f64_mul(DoubleRegister dst, DoubleRegister lhs,
}
}
+void LiftoffAssembler::emit_f64_div(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vdivsd(dst, lhs, rhs);
+ } else if (dst == rhs) {
+ movsd(kScratchDoubleReg, rhs);
+ movsd(dst, lhs);
+ divsd(dst, kScratchDoubleReg);
+ } else {
+ if (dst != lhs) movsd(dst, lhs);
+ divsd(dst, rhs);
+ }
+}
+
+void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) {
+ static constexpr uint64_t kSignBit = uint64_t{1} << 63;
+ if (dst == src) {
+ TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1);
+ Andpd(dst, kScratchDoubleReg);
+ } else {
+ TurboAssembler::Move(dst, kSignBit - 1);
+ Andpd(dst, src);
+ }
+}
+
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
static constexpr uint64_t kSignBit = uint64_t{1} << 63;
if (dst == src) {
@@ -578,8 +752,98 @@ void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
}
}
+void LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundsd(dst, src, kRoundUp);
+}
+
+void LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundsd(dst, src, kRoundDown);
+}
+
+void LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundsd(dst, src, kRoundToZero);
+}
+
+void LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ REQUIRE_CPU_FEATURE(SSE4_1);
+ Roundsd(dst, src, kRoundToNearest);
+}
+
+void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
+ Sqrtsd(dst, src);
+}
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ movl(dst.gp(), src.gp());
+ return true;
+ case kExprI32ReinterpretF32:
+ Movd(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ movsxlq(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ AssertZeroExtended(src.gp());
+ if (dst.gp() != src.gp()) movl(dst.gp(), src.gp());
+ return true;
+ case kExprI64ReinterpretF64:
+ Movq(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32:
+ Cvtlsi2ss(dst.fp(), src.gp());
+ return true;
+ case kExprF32UConvertI32:
+ movl(kScratchRegister, src.gp());
+ Cvtqsi2ss(dst.fp(), kScratchRegister);
+ return true;
+ case kExprF32SConvertI64:
+ Cvtqsi2ss(dst.fp(), src.gp());
+ return true;
+ case kExprF32UConvertI64:
+ Cvtqui2ss(dst.fp(), src.gp(), kScratchRegister);
+ return true;
+ case kExprF32ConvertF64:
+ Cvtsd2ss(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ Movd(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32:
+ Cvtlsi2sd(dst.fp(), src.gp());
+ return true;
+ case kExprF64UConvertI32:
+ movl(kScratchRegister, src.gp());
+ Cvtqsi2sd(dst.fp(), kScratchRegister);
+ return true;
+ case kExprF64SConvertI64:
+ Cvtqsi2sd(dst.fp(), src.gp());
+ return true;
+ case kExprF64UConvertI64:
+ Cvtqui2sd(dst.fp(), src.gp(), kScratchRegister);
+ return true;
+ case kExprF64ConvertF32:
+ Cvtss2sd(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ Movq(dst.fp(), src.gp());
+ return true;
+ default:
+ UNREACHABLE();
+ }
+}
+
void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
+void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
+
void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
ValueType type, Register lhs,
Register rhs) {
@@ -602,45 +866,76 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label,
j(cond, label);
}
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ testl(src, src);
+ setcc(equal, dst);
+ movzxbl(dst, dst);
+}
+
void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst,
Register lhs, Register rhs) {
- if (rhs != no_reg) {
- cmpl(lhs, rhs);
- } else {
- testl(lhs, lhs);
- }
+ cmpl(lhs, rhs);
+ setcc(cond, dst);
+ movzxbl(dst, dst);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ testq(src.gp(), src.gp());
+ setcc(equal, dst);
+ movzxbl(dst, dst);
+}
+void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cmpq(lhs.gp(), rhs.gp());
setcc(cond, dst);
movzxbl(dst, dst);
}
-void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
- DoubleRegister lhs,
- DoubleRegister rhs) {
+namespace liftoff {
+template <void (TurboAssembler::*cmp_op)(DoubleRegister, DoubleRegister)>
+void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst,
+ DoubleRegister lhs, DoubleRegister rhs) {
Label cont;
Label not_nan;
- Ucomiss(lhs, rhs);
+ (assm->*cmp_op)(lhs, rhs);
// IF PF is one, one of the operands was Nan. This needs special handling.
- j(parity_odd, &not_nan, Label::kNear);
+ assm->j(parity_odd, &not_nan, Label::kNear);
// Return 1 for f32.ne, 0 for all other cases.
if (cond == not_equal) {
- movl(dst, Immediate(1));
+ assm->movl(dst, Immediate(1));
} else {
- xorl(dst, dst);
+ assm->xorl(dst, dst);
}
- jmp(&cont, Label::kNear);
- bind(&not_nan);
+ assm->jmp(&cont, Label::kNear);
+ assm->bind(&not_nan);
- setcc(cond, dst);
- movzxbl(dst, dst);
- bind(&cont);
+ assm->setcc(cond, dst);
+ assm->movzxbl(dst, dst);
+ assm->bind(&cont);
+}
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
+ DoubleRegister rhs) {
+ liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs,
+ rhs);
}
void LiftoffAssembler::StackCheck(Label* ool_code) {
- Register limit = GetUnusedRegister(kGpReg).gp();
- LoadAddress(limit, ExternalReference::address_of_stack_limit(isolate()));
- cmpp(rsp, Operand(limit, 0));
+ Operand stack_limit = ExternalOperand(
+ ExternalReference::address_of_stack_limit(isolate()), kScratchRegister);
+ cmpp(rsp, stack_limit);
j(below_equal, ool_code);
}
@@ -671,22 +966,7 @@ void LiftoffAssembler::PushCallerFrameSlot(const VarState& src,
void LiftoffAssembler::PushCallerFrameSlot(LiftoffRegister reg,
ValueType type) {
- switch (type) {
- case kWasmI32:
- case kWasmI64:
- pushq(reg.gp());
- break;
- case kWasmF32:
- subp(rsp, Immediate(kPointerSize));
- Movss(Operand(rsp, 0), reg.fp());
- break;
- case kWasmF64:
- subp(rsp, Immediate(kPointerSize));
- Movsd(Operand(rsp, 0), reg.fp());
- break;
- default:
- UNREACHABLE();
- }
+ liftoff::push(this, reg, type);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -734,37 +1014,61 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize));
}
-void LiftoffAssembler::PrepareCCall(uint32_t num_params, const Register* args) {
- for (size_t param = 0; param < num_params; ++param) {
- pushq(args[param]);
+void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ ValueType out_argument_type) {
+ for (ValueType param_type : sig->parameters()) {
+ liftoff::push(this, *args++, param_type);
+ }
+ if (out_argument_type != kWasmStmt) {
+ subq(rsp, Immediate(kPointerSize));
}
- movq(liftoff::kCCallLastArgAddrReg, rsp);
- PrepareCallCFunction(num_params);
+ // Save the original sp (before the first push), such that we can later
+ // compute pointers to the pushed values. Do this only *after* pushing the
+ // values, because {kCCallLastArgAddrReg} might collide with an arg register.
+ int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
+ (out_argument_type != kWasmStmt);
+ int pushed_bytes = kPointerSize * num_c_call_arguments;
+ leaq(liftoff::kCCallLastArgAddrReg, Operand(rsp, pushed_bytes));
+ PrepareCallCFunction(num_c_call_arguments);
}
-void LiftoffAssembler::SetCCallRegParamAddr(Register dst, uint32_t param_idx,
- uint32_t num_params) {
- int offset = kPointerSize * static_cast<int>(num_params - 1 - param_idx);
- leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, offset));
+void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
+ ValueType type) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
+ leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, -param_byte_offset));
}
-void LiftoffAssembler::SetCCallStackParamAddr(uint32_t stack_param_idx,
- uint32_t param_idx,
- uint32_t num_params) {
+void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
+ int param_byte_offset,
+ ValueType type) {
// On x64, all C call arguments fit in registers.
UNREACHABLE();
}
+void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
+ int param_byte_offset) {
+ // Check that we don't accidentally override kCCallLastArgAddrReg.
+ DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
+ Operand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
+ liftoff::Load(this, dst, src, type);
+}
+
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
CallCFunction(ext_ref, static_cast<int>(num_params));
}
+void LiftoffAssembler::FinishCCall() {
+ movp(rsp, liftoff::kCCallLastArgAddrReg);
+}
+
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
near_call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
- // Set context to zero.
+ // Set instance to zero.
xorp(rsi, rsi);
CallRuntimeDelayed(zone, fid);
}
@@ -788,6 +1092,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addp(rsp, Immediate(size));
}
+#undef REQUIRE_CPU_FEATURE
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/compilation-manager.cc b/deps/v8/src/wasm/compilation-manager.cc
index 4779a9f423..b94e88feee 100644
--- a/deps/v8/src/wasm/compilation-manager.cc
+++ b/deps/v8/src/wasm/compilation-manager.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/wasm/compilation-manager.h"
-#include "src/base/template-utils.h"
#include "src/wasm/module-compiler.h"
#include "src/objects-inl.h"
@@ -15,10 +14,11 @@ namespace wasm {
AsyncCompileJob* CompilationManager::CreateAsyncCompileJob(
Isolate* isolate, std::unique_ptr<byte[]> bytes_copy, size_t length,
Handle<Context> context, Handle<JSPromise> promise) {
- std::shared_ptr<AsyncCompileJob> job(new AsyncCompileJob(
- isolate, std::move(bytes_copy), length, context, promise));
- jobs_.insert({job.get(), job});
- return job.get();
+ AsyncCompileJob* job = new AsyncCompileJob(isolate, std::move(bytes_copy),
+ length, context, promise);
+ // Pass ownership to the unique_ptr in {jobs_}.
+ jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
+ return job;
}
void CompilationManager::StartAsyncCompileJob(
@@ -36,11 +36,11 @@ std::shared_ptr<StreamingDecoder> CompilationManager::StartStreamingCompilation(
return job->CreateStreamingDecoder();
}
-std::shared_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
+std::unique_ptr<AsyncCompileJob> CompilationManager::RemoveJob(
AsyncCompileJob* job) {
auto item = jobs_.find(job);
DCHECK(item != jobs_.end());
- std::shared_ptr<AsyncCompileJob> result = std::move(item->second);
+ std::unique_ptr<AsyncCompileJob> result = std::move(item->second);
jobs_.erase(item);
return result;
}
@@ -50,10 +50,11 @@ void CompilationManager::TearDown() { jobs_.clear(); }
void CompilationManager::AbortAllJobs() {
// Iterate over a copy of {jobs_}, because {job->Abort} modifies {jobs_}.
std::vector<AsyncCompileJob*> copy;
+ copy.reserve(jobs_.size());
- for (auto entry : jobs_) copy.push_back(entry.first);
+ for (auto& entry : jobs_) copy.push_back(entry.first);
- for (auto job : copy) job->Abort();
+ for (auto* job : copy) job->Abort();
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/compilation-manager.h b/deps/v8/src/wasm/compilation-manager.h
index 279f3e872e..38e80c6c2d 100644
--- a/deps/v8/src/wasm/compilation-manager.h
+++ b/deps/v8/src/wasm/compilation-manager.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_COMPILATION_MANAGER_H_
#define V8_WASM_COMPILATION_MANAGER_H_
-#include <vector>
+#include <unordered_map>
#include "src/handles.h"
#include "src/isolate.h"
@@ -31,7 +31,7 @@ class CompilationManager {
Isolate* isolate, Handle<Context> context, Handle<JSPromise> promise);
// Remove {job} from the list of active compile jobs.
- std::shared_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
+ std::unique_ptr<AsyncCompileJob> RemoveJob(AsyncCompileJob* job);
// Cancel all AsyncCompileJobs and delete their state immediately.
void TearDown();
@@ -41,6 +41,9 @@ class CompilationManager {
// AsyncCompileJob finish their execution.
void AbortAllJobs();
+ // Returns true if at lease one AsyncCompileJob is currently running.
+ bool HasRunningCompileJob() const { return !jobs_.empty(); }
+
private:
AsyncCompileJob* CreateAsyncCompileJob(Isolate* isolate,
std::unique_ptr<byte[]> bytes_copy,
@@ -49,7 +52,7 @@ class CompilationManager {
// We use an AsyncCompileJob as the key for itself so that we can delete the
// job from the map when it is finished.
- std::unordered_map<AsyncCompileJob*, std::shared_ptr<AsyncCompileJob>> jobs_;
+ std::unordered_map<AsyncCompileJob*, std::unique_ptr<AsyncCompileJob>> jobs_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 242130b035..d7e76e4a77 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -46,6 +46,8 @@ class Decoder {
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, start, end, buffer_offset) {}
+ explicit Decoder(const Vector<const byte> bytes, uint32_t buffer_offset = 0)
+ : Decoder(bytes.start(), bytes.start() + bytes.length(), buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
uint32_t buffer_offset = 0)
: start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {
@@ -248,6 +250,7 @@ class Decoder {
const byte* start() const { return start_; }
const byte* pc() const { return pc_; }
+ uint32_t position() const { return static_cast<uint32_t>(pc_ - start_); }
uint32_t pc_offset() const {
return static_cast<uint32_t>(pc_ - start_) + buffer_offset_;
}
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 98aad07fcb..c8c1aeec8b 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -54,36 +54,72 @@ struct WasmException;
(this->errorf(this->pc_, "%s: %s", WasmOpcodes::OpcodeName(opcode), \
(message)))
-#define ATOMIC_OP_LIST(V) \
- V(I32AtomicLoad, Uint32) \
- V(I32AtomicLoad8U, Uint8) \
- V(I32AtomicLoad16U, Uint16) \
- V(I32AtomicAdd, Uint32) \
- V(I32AtomicAdd8U, Uint8) \
- V(I32AtomicAdd16U, Uint16) \
- V(I32AtomicSub, Uint32) \
- V(I32AtomicSub8U, Uint8) \
- V(I32AtomicSub16U, Uint16) \
- V(I32AtomicAnd, Uint32) \
- V(I32AtomicAnd8U, Uint8) \
- V(I32AtomicAnd16U, Uint16) \
- V(I32AtomicOr, Uint32) \
- V(I32AtomicOr8U, Uint8) \
- V(I32AtomicOr16U, Uint16) \
- V(I32AtomicXor, Uint32) \
- V(I32AtomicXor8U, Uint8) \
- V(I32AtomicXor16U, Uint16) \
- V(I32AtomicExchange, Uint32) \
- V(I32AtomicExchange8U, Uint8) \
- V(I32AtomicExchange16U, Uint16) \
- V(I32AtomicCompareExchange, Uint32) \
- V(I32AtomicCompareExchange8U, Uint8) \
- V(I32AtomicCompareExchange16U, Uint16)
+#define ATOMIC_OP_LIST(V) \
+ V(I32AtomicLoad, Uint32) \
+ V(I64AtomicLoad, Uint64) \
+ V(I32AtomicLoad8U, Uint8) \
+ V(I32AtomicLoad16U, Uint16) \
+ V(I64AtomicLoad8U, Uint8) \
+ V(I64AtomicLoad16U, Uint16) \
+ V(I64AtomicLoad32U, Uint32) \
+ V(I32AtomicAdd, Uint32) \
+ V(I32AtomicAdd8U, Uint8) \
+ V(I32AtomicAdd16U, Uint16) \
+ V(I64AtomicAdd, Uint64) \
+ V(I64AtomicAdd8U, Uint8) \
+ V(I64AtomicAdd16U, Uint16) \
+ V(I64AtomicAdd32U, Uint32) \
+ V(I32AtomicSub, Uint32) \
+ V(I64AtomicSub, Uint64) \
+ V(I32AtomicSub8U, Uint8) \
+ V(I32AtomicSub16U, Uint16) \
+ V(I64AtomicSub8U, Uint8) \
+ V(I64AtomicSub16U, Uint16) \
+ V(I64AtomicSub32U, Uint32) \
+ V(I32AtomicAnd, Uint32) \
+ V(I64AtomicAnd, Uint64) \
+ V(I32AtomicAnd8U, Uint8) \
+ V(I32AtomicAnd16U, Uint16) \
+ V(I64AtomicAnd8U, Uint8) \
+ V(I64AtomicAnd16U, Uint16) \
+ V(I64AtomicAnd32U, Uint32) \
+ V(I32AtomicOr, Uint32) \
+ V(I64AtomicOr, Uint64) \
+ V(I32AtomicOr8U, Uint8) \
+ V(I32AtomicOr16U, Uint16) \
+ V(I64AtomicOr8U, Uint8) \
+ V(I64AtomicOr16U, Uint16) \
+ V(I64AtomicOr32U, Uint32) \
+ V(I32AtomicXor, Uint32) \
+ V(I64AtomicXor, Uint64) \
+ V(I32AtomicXor8U, Uint8) \
+ V(I32AtomicXor16U, Uint16) \
+ V(I64AtomicXor8U, Uint8) \
+ V(I64AtomicXor16U, Uint16) \
+ V(I64AtomicXor32U, Uint32) \
+ V(I32AtomicExchange, Uint32) \
+ V(I64AtomicExchange, Uint64) \
+ V(I32AtomicExchange8U, Uint8) \
+ V(I32AtomicExchange16U, Uint16) \
+ V(I64AtomicExchange8U, Uint8) \
+ V(I64AtomicExchange16U, Uint16) \
+ V(I64AtomicExchange32U, Uint32) \
+ V(I32AtomicCompareExchange, Uint32) \
+ V(I64AtomicCompareExchange, Uint64) \
+ V(I32AtomicCompareExchange8U, Uint8) \
+ V(I32AtomicCompareExchange16U, Uint16) \
+ V(I64AtomicCompareExchange8U, Uint8) \
+ V(I64AtomicCompareExchange16U, Uint16) \
+ V(I64AtomicCompareExchange32U, Uint32)
#define ATOMIC_STORE_OP_LIST(V) \
V(I32AtomicStore, Uint32) \
+ V(I64AtomicStore, Uint64) \
V(I32AtomicStore8U, Uint8) \
- V(I32AtomicStore16U, Uint16)
+ V(I32AtomicStore16U, Uint16) \
+ V(I64AtomicStore8U, Uint8) \
+ V(I64AtomicStore16U, Uint16) \
+ V(I64AtomicStore32U, Uint32)
template <typename T, typename Allocator>
Vector<T> vec2vec(std::vector<T, Allocator>& vec) {
@@ -213,6 +249,9 @@ struct BlockTypeOperand {
case kLocalS128:
*result = kWasmS128;
return true;
+ case kLocalAnyRef:
+ *result = kWasmAnyRef;
+ return true;
default:
*result = kWasmVar;
return false;
@@ -574,6 +613,7 @@ struct ControlWithNamedConstructors : public ControlBase<Value> {
F(I64Const, Value* result, int64_t value) \
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
+ F(RefNull, Value* result) \
F(Drop, const Value& value) \
F(DoReturn, Vector<Value> values, bool implicit) \
F(GetLocal, Value* result, const LocalIndexOperand<validate>& operand) \
@@ -676,6 +716,13 @@ class WasmDecoder : public Decoder {
case kLocalF64:
type = kWasmF64;
break;
+ case kLocalAnyRef:
+ if (FLAG_experimental_wasm_anyref) {
+ type = kWasmAnyRef;
+ break;
+ }
+ decoder->error(decoder->pc() - 1, "invalid local type");
+ return false;
case kLocalS128:
if (FLAG_experimental_wasm_simd) {
type = kWasmS128;
@@ -727,7 +774,7 @@ class WasmDecoder : public Decoder {
case kExprGrowMemory:
case kExprCallFunction:
case kExprCallIndirect:
- // Add context cache nodes to the assigned set.
+ // Add instance cache nodes to the assigned set.
// TODO(titzer): make this more clear.
assigned->Add(locals_count - 1);
length = OpcodeLength(decoder, pc);
@@ -972,6 +1019,9 @@ class WasmDecoder : public Decoder {
ImmI64Operand<validate> operand(decoder, pc);
return 1 + operand.length;
}
+ case kExprRefNull: {
+ return 1;
+ }
case kExprGrowMemory:
case kExprMemorySize: {
MemoryIndexOperand<validate> operand(decoder, pc);
@@ -1064,6 +1114,7 @@ class WasmDecoder : public Decoder {
case kExprI64Const:
case kExprF32Const:
case kExprF64Const:
+ case kExprRefNull:
case kExprMemorySize:
return {0, 1};
case kExprCallFunction: {
@@ -1140,8 +1191,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// All Value types should be trivially copyable for performance. We push, pop,
// and store them in local variables.
- static_assert(IS_TRIVIALLY_COPYABLE(Value),
- "all Value<...> types should be trivially copyable");
+ ASSERT_TRIVIALLY_COPYABLE(Value);
public:
template <typename... InterfaceArgs>
@@ -1367,7 +1417,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!LookupBlockType(&operand)) break;
PopArgs(operand.sig);
auto* block = PushBlock();
- SetBlockType(block, operand, args_);
+ SetBlockType(block, operand);
CALL_INTERFACE_IF_REACHABLE(Block, block);
PushMergeValues(block, &block->start_merge);
len = 1 + operand.length;
@@ -1396,7 +1446,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!LookupBlockType(&operand)) break;
PopArgs(operand.sig);
auto* try_block = PushTry();
- SetBlockType(try_block, operand, args_);
+ SetBlockType(try_block, operand);
len = 1 + operand.length;
CALL_INTERFACE_IF_REACHABLE(Try, try_block);
PushMergeValues(try_block, &try_block->start_merge);
@@ -1450,7 +1500,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!LookupBlockType(&operand)) break;
PopArgs(operand.sig);
auto* block = PushLoop();
- SetBlockType(&control_.back(), operand, args_);
+ SetBlockType(&control_.back(), operand);
len = 1 + operand.length;
CALL_INTERFACE_IF_REACHABLE(Loop, block);
PushMergeValues(block, &block->start_merge);
@@ -1463,7 +1513,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopArgs(operand.sig);
if (!this->ok()) break;
auto* if_block = PushIf();
- SetBlockType(if_block, operand, args_);
+ SetBlockType(if_block, operand);
CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
len = 1 + operand.length;
PushMergeValues(if_block, &if_block->start_merge);
@@ -1650,6 +1700,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + operand.length;
break;
}
+ case kExprRefNull: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ auto* value = Push(kWasmAnyRef);
+ CALL_INTERFACE_IF_REACHABLE(RefNull, value);
+ len = 1;
+ break;
+ }
case kExprGetLocal: {
LocalIndexOperand<Decoder::kValidate> operand(this, this->pc_);
if (!this->Validate(this->pc_, operand)) break;
@@ -1973,13 +2030,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- void SetBlockType(Control* c, BlockTypeOperand<validate>& operand,
- ZoneVector<Value>& params) {
- InitMerge(&c->end_merge, operand.out_arity(),
- [&] (uint32_t i) {
- return Value::New(this->pc_, operand.out_type(i)); });
+ void SetBlockType(Control* c, BlockTypeOperand<validate>& operand) {
+ DCHECK_EQ(operand.in_arity(), this->args_.size());
+ const byte* pc = this->pc_;
+ Value* args = this->args_.data();
+ InitMerge(&c->end_merge, operand.out_arity(), [pc, &operand](uint32_t i) {
+ return Value::New(pc, operand.out_type(i));
+ });
InitMerge(&c->start_merge, operand.in_arity(),
- [&] (uint32_t i) { return params[i]; });
+ [args](uint32_t i) { return args[i]; });
}
// Pops arguments as required by signature into {args_}.
@@ -2370,6 +2429,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (WasmOpcodes::IsSignExtensionOpcode(opcode)) {
RET_ON_PROTOTYPE_OPCODE(se);
}
+ if (WasmOpcodes::IsAnyRefOpcode(opcode)) {
+ RET_ON_PROTOTYPE_OPCODE(anyref);
+ }
+
switch (sig->parameter_count()) {
case 1: {
auto val = Pop(0, sig->GetParam(0));
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 217a5ff3b1..7298ad97ae 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -37,7 +37,7 @@ struct SsaEnv {
State state;
TFNode* control;
TFNode* effect;
- compiler::WasmContextCacheNodes context_cache;
+ compiler::WasmInstanceCacheNodes instance_cache;
TFNode** locals;
bool go() { return state >= kReached; }
@@ -46,7 +46,7 @@ struct SsaEnv {
locals = nullptr;
control = nullptr;
effect = nullptr;
- context_cache = {0};
+ instance_cache = {};
}
void SetNotMerged() {
if (state == kMerged) state = kReached;
@@ -100,14 +100,14 @@ class WasmGraphBuildingInterface {
: nullptr;
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
- // wasm_context parameter.
+ // instance parameter.
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
- // Initialize the wasm_context (the paramater at index 0).
- builder_->set_wasm_context(
- builder_->Param(compiler::kWasmContextParameterIndex));
+ // Initialize the instance parameter (index 0).
+ builder_->set_instance_node(
+ builder_->Param(compiler::kWasmInstanceParameterIndex));
// Initialize local variables. Parameters are shifted by 1 because of the
- // the wasm_context.
+ // the instance parameter.
uint32_t index = 0;
for (; index < decoder->sig_->parameter_count(); ++index) {
ssa_env->locals[index] = builder_->Param(index + 1);
@@ -129,11 +129,10 @@ class WasmGraphBuildingInterface {
SetEnv(ssa_env);
}
- // Reload the wasm context variables from the WasmContext structure attached
- // to the memory object into the Ssa Environment.
+ // Reload the instance cache entries into the Ssa Environment.
void LoadContextIntoSsa(SsaEnv* ssa_env) {
if (!ssa_env || !ssa_env->go()) return;
- builder_->InitContextCache(&ssa_env->context_cache);
+ builder_->InitInstanceCache(&ssa_env->instance_cache);
}
void StartFunctionBody(Decoder* decoder, Control* block) {
@@ -235,6 +234,10 @@ class WasmGraphBuildingInterface {
result->node = builder_->Float64Constant(value);
}
+ void RefNull(Decoder* decoder, Value* result) {
+ result->node = builder_->RefNull();
+ }
+
void Drop(Decoder* decoder, const Value& value) {}
void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
@@ -362,7 +365,7 @@ class WasmGraphBuildingInterface {
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
result->node = BUILD(GrowMemory, value.node);
- // Always reload the context cache after growing memory.
+ // Always reload the instance cache after growing memory.
LoadContextIntoSsa(ssa_env_);
}
@@ -545,10 +548,10 @@ class WasmGraphBuildingInterface {
}
#endif
ssa_env_ = env;
- // TODO(wasm): combine the control and effect pointers with context cache.
+ // TODO(wasm): combine the control and effect pointers with instance cache.
builder_->set_control_ptr(&env->control);
builder_->set_effect_ptr(&env->effect);
- builder_->set_context_cache(&env->context_cache);
+ builder_->set_instance_cache(&env->instance_cache);
}
TFNode* CheckForException(Decoder* decoder, TFNode* node) {
@@ -634,7 +637,7 @@ class WasmGraphBuildingInterface {
to->locals = from->locals;
to->control = from->control;
to->effect = from->effect;
- to->context_cache = from->context_cache;
+ to->instance_cache = from->instance_cache;
break;
}
case SsaEnv::kReached: { // Create a new merge.
@@ -658,9 +661,9 @@ class WasmGraphBuildingInterface {
builder_->Phi(decoder->GetLocalType(i), 2, vals, merge);
}
}
- // Start a new merge from the context cache.
- builder_->NewContextCacheMerge(&to->context_cache, &from->context_cache,
- merge);
+ // Start a new merge from the instance cache.
+ builder_->NewInstanceCacheMerge(&to->instance_cache,
+ &from->instance_cache, merge);
break;
}
case SsaEnv::kMerged: {
@@ -675,9 +678,9 @@ class WasmGraphBuildingInterface {
to->locals[i] = builder_->CreateOrMergeIntoPhi(
decoder->GetLocalType(i), merge, to->locals[i], from->locals[i]);
}
- // Merge the context caches.
- builder_->MergeContextCacheInto(&to->context_cache,
- &from->context_cache, merge);
+ // Merge the instance caches.
+ builder_->MergeInstanceCacheInto(&to->instance_cache,
+ &from->instance_cache, merge);
break;
}
default:
@@ -693,21 +696,22 @@ class WasmGraphBuildingInterface {
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
- // The '+ 1' here is to be able to set the context cache as assigned.
+ // The '+ 1' here is to be able to set the instance cache as assigned.
BitVector* assigned = WasmDecoder<validate>::AnalyzeLoopAssignment(
decoder, decoder->pc(), decoder->total_locals() + 1, decoder->zone());
if (decoder->failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
- int context_cache_index = decoder->total_locals();
+ int instance_cache_index = decoder->total_locals();
for (int i = decoder->NumLocals() - 1; i >= 0; i--) {
if (!assigned->Contains(i)) continue;
env->locals[i] = builder_->Phi(decoder->GetLocalType(i), 1,
&env->locals[i], env->control);
}
- // Introduce phis for context cache pointers if necessary.
- if (assigned->Contains(context_cache_index)) {
- builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
+ // Introduce phis for instance cache pointers if necessary.
+ if (assigned->Contains(instance_cache_index)) {
+ builder_->PrepareInstanceCacheForLoop(&env->instance_cache,
+ env->control);
}
SsaEnv* loop_body_env = Split(decoder, env);
@@ -722,8 +726,8 @@ class WasmGraphBuildingInterface {
&env->locals[i], env->control);
}
- // Conservatively introduce phis for context cache.
- builder_->PrepareContextCacheForLoop(&env->context_cache, env->control);
+ // Conservatively introduce phis for instance cache.
+ builder_->PrepareInstanceCacheForLoop(&env->instance_cache, env->control);
SsaEnv* loop_body_env = Split(decoder, env);
builder_->StackCheck(decoder->position(), &loop_body_env->effect,
@@ -746,11 +750,11 @@ class WasmGraphBuildingInterface {
size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
: nullptr;
memcpy(result->locals, from->locals, size);
- result->context_cache = from->context_cache;
+ result->instance_cache = from->instance_cache;
} else {
result->state = SsaEnv::kUnreachable;
result->locals = nullptr;
- result->context_cache = {0};
+ result->instance_cache = {};
}
return result;
@@ -766,7 +770,7 @@ class WasmGraphBuildingInterface {
result->locals = from->locals;
result->control = from->control;
result->effect = from->effect;
- result->context_cache = from->context_cache;
+ result->instance_cache = from->instance_cache;
from->Kill(SsaEnv::kUnreachable);
return result;
}
@@ -778,7 +782,7 @@ class WasmGraphBuildingInterface {
result->control = nullptr;
result->effect = nullptr;
result->locals = nullptr;
- result->context_cache = {0};
+ result->instance_cache = {};
return result;
}
@@ -959,8 +963,31 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
os << RawOpcodeName(opcode) << ",";
- for (unsigned j = 1; j < length; ++j) {
- os << " 0x" << AsHex(i.pc()[j], 2) << ",";
+ if (opcode == kExprLoop || opcode == kExprIf || opcode == kExprBlock ||
+ opcode == kExprTry) {
+ DCHECK_EQ(2, length);
+
+ switch (i.pc()[1]) {
+#define CASE_LOCAL_TYPE(local_name, type_name) \
+ case kLocal##local_name: \
+ os << " kWasm" #type_name ","; \
+ break;
+
+ CASE_LOCAL_TYPE(I32, I32)
+ CASE_LOCAL_TYPE(I64, I64)
+ CASE_LOCAL_TYPE(F32, F32)
+ CASE_LOCAL_TYPE(F64, F64)
+ CASE_LOCAL_TYPE(S128, S128)
+ CASE_LOCAL_TYPE(Void, Stmt)
+ default:
+ os << " 0x" << AsHex(i.pc()[1], 2) << ",";
+ break;
+ }
+#undef CASE_LOCAL_TYPE
+ } else {
+ for (unsigned j = 1; j < length; ++j) {
+ os << " 0x" << AsHex(i.pc()[j], 2) << ",";
+ }
}
switch (opcode) {
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 0a09feddf2..dd8794ca54 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -4,8 +4,6 @@
#include "src/wasm/module-compiler.h"
-#include <atomic>
-
#include "src/api.h"
#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
@@ -15,6 +13,7 @@
#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
+#include "src/identity-map.h"
#include "src/property-descriptor.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-manager.h"
@@ -56,83 +55,26 @@ namespace v8 {
namespace internal {
namespace wasm {
-static constexpr int kInvalidSigIndex = -1;
-
-// A class compiling an entire module.
-class ModuleCompiler {
- public:
- ModuleCompiler(Isolate* isolate, WasmModule* module, Handle<Code> centry_stub,
- wasm::NativeModule* native_module);
-
- // The actual runnable task that performs compilations in the background.
- class CompilationTask : public CancelableTask {
- public:
- ModuleCompiler* compiler_;
- explicit CompilationTask(ModuleCompiler* compiler)
- : CancelableTask(&compiler->background_task_manager_),
- compiler_(compiler) {}
-
- void RunInternal() override {
- while (compiler_->executed_units_.CanAcceptWork() &&
- compiler_->FetchAndExecuteCompilationUnit()) {
- }
-
- compiler_->OnBackgroundTaskStopped();
- }
- };
-
- // The CompilationUnitBuilder builds compilation units and stores them in an
- // internal buffer. The buffer is moved into the working queue of the
- // ModuleCompiler when {Commit} is called.
- class CompilationUnitBuilder {
- public:
- explicit CompilationUnitBuilder(ModuleCompiler* compiler)
- : compiler_(compiler) {}
-
- ~CompilationUnitBuilder() { DCHECK(units_.empty()); }
-
- void AddUnit(compiler::ModuleEnv* module_env,
- wasm::NativeModule* native_module,
- const WasmFunction* function, uint32_t buffer_offset,
- Vector<const uint8_t> bytes, WasmName name) {
- units_.emplace_back(new compiler::WasmCompilationUnit(
- compiler_->isolate_, module_env, native_module,
- wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
- bytes.end()},
- name, function->func_index, compiler_->centry_stub_,
- compiler::WasmCompilationUnit::GetDefaultCompilationMode(),
- compiler_->counters()));
- }
-
- bool Commit() {
- if (units_.empty()) return false;
-
- {
- base::LockGuard<base::Mutex> guard(
- &compiler_->compilation_units_mutex_);
- compiler_->compilation_units_.insert(
- compiler_->compilation_units_.end(),
- std::make_move_iterator(units_.begin()),
- std::make_move_iterator(units_.end()));
- }
- units_.clear();
- return true;
- }
-
- void Clear() { units_.clear(); }
+enum class CompilationEvent : uint8_t {
+ kFinishedBaselineCompilation,
+ kFailedCompilation
+};
- private:
- ModuleCompiler* compiler_;
- std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
- };
+enum class NotifyCompilationCallback : uint8_t { kNotify, kNoNotify };
+// The CompilationState keeps track of the compilation state of the
+// owning NativeModule, i.e. which functions are left to be compiled.
+// It contains a task manager to allow parallel and asynchronous background
+// compilation of functions.
+class CompilationState {
+ public:
class CodeGenerationSchedule {
public:
explicit CodeGenerationSchedule(
base::RandomNumberGenerator* random_number_generator,
size_t max_memory = 0);
- void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
+ void Schedule(std::unique_ptr<compiler::WasmCompilationUnit> item);
bool IsEmpty() const { return schedule_.empty(); }
@@ -142,108 +84,98 @@ class ModuleCompiler {
bool ShouldIncreaseWorkload() const;
- void EnableThrottling() { throttle_ = true; }
-
private:
size_t GetRandomIndexInSchedule();
base::RandomNumberGenerator* random_number_generator_ = nullptr;
std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
const size_t max_memory_;
- bool throttle_ = false;
- base::AtomicNumber<size_t> allocated_memory_{0};
+ size_t allocated_memory_ = 0;
};
- Counters* counters() const { return async_counters_.get(); }
+ explicit CompilationState(internal::Isolate* isolate);
+ ~CompilationState();
- // Run by each compilation task and by the main thread (i.e. in both
- // foreground and background threads). The no_finisher_callback is called
- // within the result_mutex_ lock when no finishing task is running, i.e. when
- // the finisher_is_running_ flag is not set.
- bool FetchAndExecuteCompilationUnit(
- std::function<void()> no_finisher_callback = nullptr);
-
- void OnBackgroundTaskStopped();
+ // Needs to be set before {AddCompilationUnits} is run, which triggers
+ // background compilation.
+ void SetNumberOfFunctionsToCompile(size_t num_functions);
+ void AddCallback(
+ std::function<void(CompilationEvent, Handle<Object>)> callback);
- void EnableThrottling() { executed_units_.EnableThrottling(); }
-
- bool CanAcceptWork() const { return executed_units_.CanAcceptWork(); }
-
- bool ShouldIncreaseWorkload() {
- if (executed_units_.ShouldIncreaseWorkload()) {
- // Check if it actually makes sense to increase the workload.
- base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
- return !compilation_units_.empty();
- }
- return false;
- }
+ // Inserts new functions to compile and kicks off compilation.
+ void AddCompilationUnits(
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>& units);
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNextCompilationUnit();
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNextExecutedUnit();
+ bool HasCompilationUnitToFinish();
- size_t InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
- const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env);
+ void OnError(Handle<Object> error, NotifyCompilationCallback notify);
+ void OnFinishedUnit(NotifyCompilationCallback notify);
+ void ScheduleUnitForFinishing(
+ std::unique_ptr<compiler::WasmCompilationUnit>& unit);
- void RestartCompilationTasks();
+ void CancelAndWait();
+ void OnBackgroundTaskStopped();
+ void RestartBackgroundTasks(size_t max = std::numeric_limits<size_t>::max());
+ // Only one foreground thread (finisher) is allowed to run at a time.
+ // {SetFinisherIsRunning} returns whether the flag changed its state.
+ bool SetFinisherIsRunning(bool value);
+ void ScheduleFinisherTask();
- size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
+ bool StopBackgroundCompilationTaskForThrottling();
- bool IsFinisherRunning() const { return finisher_is_running_; }
+ void Abort();
- void SetFinisherIsRunning(bool value);
+ Isolate* isolate() const { return isolate_; }
- WasmCodeWrapper FinishCompilationUnit(ErrorThrower* thrower, int* func_index);
+ bool failed() const {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return failed_;
+ }
- void CompileInParallel(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
+ private:
+ void NotifyOnEvent(CompilationEvent event, Handle<Object> error);
- void CompileSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower);
+ Isolate* isolate_;
- void ValidateSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- ErrorThrower* thrower);
+ // This mutex protects all information of this CompilationState which is being
+ // accessed concurrently.
+ mutable base::Mutex mutex_;
- MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
- ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes);
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {mutex_}:
- private:
- Isolate* isolate_;
- WasmModule* module_;
- const std::shared_ptr<Counters> async_counters_;
std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
compilation_units_;
- base::Mutex compilation_units_mutex_;
CodeGenerationSchedule executed_units_;
- base::Mutex result_mutex_;
- const size_t num_background_tasks_;
- // This flag should only be set while holding result_mutex_.
bool finisher_is_running_ = false;
+ bool failed_ = false;
+ size_t num_background_tasks_ = 0;
+
+ // End of fields protected by {mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
+ std::vector<std::function<void(CompilationEvent, Handle<Object>)>> callbacks_;
+
+ // When canceling the background_task_manager_, use {CancelAndWait} on
+ // the CompilationState in order to cleanly clean up.
CancelableTaskManager background_task_manager_;
- size_t stopped_compilation_tasks_ = 0;
- base::Mutex tasks_mutex_;
- Handle<Code> centry_stub_;
- wasm::NativeModule* native_module_;
+ CancelableTaskManager foreground_task_manager_;
+ std::shared_ptr<v8::TaskRunner> background_task_runner_;
+ std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
+
+ const size_t max_background_tasks_ = 0;
+
+ size_t outstanding_units_ = 0;
};
namespace {
class JSToWasmWrapperCache {
public:
- void SetContextAddress(Address context_address) {
- // Prevent to have different context addresses in the cache.
- DCHECK(code_cache_.empty());
- context_address_ = context_address;
- }
-
Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
wasm::WasmModule* module,
- WasmCodeWrapper wasm_code,
+ wasm::WasmCode* wasm_code,
uint32_t index,
bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
@@ -251,35 +183,15 @@ class JSToWasmWrapperCache {
if (cached_idx >= 0) {
Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
// Now patch the call to wasm code.
- if (wasm_code.IsCodeObject()) {
- for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
- DCHECK(!it.done());
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->kind() == Code::WASM_FUNCTION ||
- target->kind() == Code::WASM_TO_JS_FUNCTION ||
- target->kind() == Code::WASM_TO_WASM_FUNCTION ||
- target->builtin_index() == Builtins::kIllegal ||
- target->builtin_index() == Builtins::kWasmCompileLazy) {
- it.rinfo()->set_target_address(
- wasm_code.GetCode()->instruction_start());
- break;
- }
- }
- } else {
- RelocIterator it(*code,
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- DCHECK(!it.done());
- it.rinfo()->set_js_to_wasm_address(
- wasm_code.is_null()
- ? nullptr
- : wasm_code.GetWasmCode()->instructions().start());
- }
+ RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
+ DCHECK(!it.done());
+ it.rinfo()->set_js_to_wasm_address(
+ wasm_code == nullptr ? nullptr : wasm_code->instructions().start());
return code;
}
Handle<Code> code = compiler::CompileJSToWasmWrapper(
- isolate, module, wasm_code, index, context_address_, use_trap_handler);
+ isolate, module, weak_instance_, wasm_code, index, use_trap_handler);
uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
DCHECK_EQ(code_cache_.size(), new_cache_idx);
USE(new_cache_idx);
@@ -287,11 +199,15 @@ class JSToWasmWrapperCache {
return code;
}
+ void SetWeakInstance(Handle<WeakCell> weak_instance) {
+ weak_instance_ = weak_instance;
+ }
+
private:
// sig_map_ maps signatures to an index in code_cache_.
wasm::SignatureMap sig_map_;
std::vector<Handle<Code>> code_cache_;
- Address context_address_ = nullptr;
+ Handle<WeakCell> weak_instance_;
};
// A helper class to simplify instantiating a module from a compiled module.
@@ -314,7 +230,7 @@ class InstanceBuilder {
struct TableInstance {
Handle<WasmTableObject> table_object; // WebAssembly.Table instance
Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> function_table; // internal array of <sig,code> pairs
+ size_t table_size;
};
// A pre-evaluated value to use in import binding.
@@ -342,6 +258,7 @@ class InstanceBuilder {
const std::shared_ptr<Counters>& async_counters() const {
return async_counters_;
}
+
Counters* counters() const { return async_counters().get(); }
bool use_trap_handler() const { return compiled_module_->use_trap_handler(); }
@@ -381,20 +298,16 @@ class InstanceBuilder {
uint32_t EvalUint32InitExpr(const WasmInitExpr& expr);
// Load data segments into the memory.
- void LoadDataSegments(WasmContext* wasm_context);
+ void LoadDataSegments(Handle<WasmInstanceObject> instance);
void WriteGlobalValue(WasmGlobal& global, Handle<Object> value);
void SanitizeImports();
- Handle<FixedArray> SetupWasmToJSImportsTable(
- Handle<WasmInstanceObject> instance);
-
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
- int ProcessImports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
+ int ProcessImports(Handle<WasmInstanceObject> instance);
template <typename T>
T* GetRawGlobalPtr(WasmGlobal& global);
@@ -415,43 +328,9 @@ class InstanceBuilder {
void InitializeTables(Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization);
- void LoadTableSegments(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance);
-};
-
-// This is used in ProcessImports.
-// When importing other modules' exports, we need to ask
-// the exporter for a WasmToWasm wrapper. To do that, we need to
-// switch that module to RW. To avoid flip-floping the same module
-// RW <->RX, we create a scope for a set of NativeModules.
-class SetOfNativeModuleModificationScopes final {
- public:
- void Add(NativeModule* module) {
- if (native_modules_.insert(module).second) {
- module->SetExecutable(false);
- }
- }
-
- ~SetOfNativeModuleModificationScopes() {
- for (NativeModule* module : native_modules_) {
- module->SetExecutable(true);
- }
- }
-
- private:
- std::unordered_set<NativeModule*> native_modules_;
+ void LoadTableSegments(Handle<WasmInstanceObject> instance);
};
-void EnsureWasmContextTable(WasmContext* wasm_context, int table_size) {
- if (wasm_context->table) return;
- wasm_context->table_size = table_size;
- wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
- calloc(table_size, sizeof(IndirectFunctionTableEntry)));
- for (int i = 0; i < table_size; i++) {
- wasm_context->table[i].sig_id = kInvalidSigIndex;
- }
-}
-
} // namespace
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
@@ -466,267 +345,137 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
return {};
}
-Handle<Code> CompileLazyOnGCHeap(Isolate* isolate) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
-
- // Find the wasm frame which triggered the lazy compile, to get the wasm
- // instance.
- StackFrameIterator it(isolate);
- // First frame: C entry stub.
- DCHECK(!it.done());
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Second frame: WasmCompileLazy builtin.
- DCHECK(!it.done());
- Handle<Code> lazy_compile_code(it.frame()->LookupCode(), isolate);
- DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_compile_code->builtin_index());
- Handle<WasmInstanceObject> instance;
- Handle<FixedArray> exp_deopt_data;
- int func_index = -1;
- // If the lazy compile stub has deopt data, use that to determine the
- // instance and function index. Otherwise this must be a wasm->wasm call
- // within one instance, so extract the information from the caller.
- if (lazy_compile_code->deoptimization_data()->length() > 0) {
- exp_deopt_data = handle(lazy_compile_code->deoptimization_data(), isolate);
- auto func_info = GetWasmFunctionInfo(isolate, lazy_compile_code);
- instance = func_info.instance.ToHandleChecked();
- func_index = func_info.func_index;
- }
- it.Advance();
- // Third frame: The calling wasm code or js-to-wasm wrapper.
- DCHECK(!it.done());
- DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
- Handle<Code> caller_code = handle(it.frame()->LookupCode(), isolate);
- if (it.frame()->is_js_to_wasm()) {
- DCHECK(!instance.is_null());
- } else if (instance.is_null()) {
- // Then this is a direct call (otherwise we would have attached the instance
- // via deopt data to the lazy compile stub). Just use the instance of the
- // caller.
- instance =
- handle(WasmInstanceObject::GetOwningInstanceGC(*caller_code), isolate);
- }
- int offset =
- static_cast<int>(it.frame()->pc() - caller_code->instruction_start());
- // Only patch the caller code if this is *no* indirect call.
- // exp_deopt_data will be null if the called function is not exported at all,
- // and its length will be <= 2 if all entries in tables were already patched.
- // Note that this check is conservative: If the first call to an exported
- // function is direct, we will just patch the export tables, and only on the
- // second call we will patch the caller.
- bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
- exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
-
- wasm::LazyCompilationOrchestrator* orchestrator =
- Managed<wasm::LazyCompilationOrchestrator>::cast(
- instance->compiled_module()
- ->shared()
- ->lazy_compilation_orchestrator())
- ->get();
- DCHECK(!orchestrator->IsFrozenForTesting());
-
- Handle<Code> compiled_code = orchestrator->CompileLazyOnGCHeap(
- isolate, instance, caller_code, offset, func_index, patch_caller);
- if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
- TRACE_LAZY("Patching %d position(s) in function tables.\n",
- (exp_deopt_data->length() - 2) / 2);
- // See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
- // <export_table, index> followed by undefined values.
- // Use this information here to patch all export tables.
- DCHECK_EQ(0, exp_deopt_data->length() % 2);
- for (int idx = 2, end = exp_deopt_data->length(); idx < end; idx += 2) {
- if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
- int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- int table_index = compiler::FunctionTableCodeOffset(exp_index);
- DCHECK(exp_table->get(table_index) == *lazy_compile_code);
- exp_table->set(table_index, *compiled_code);
- }
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- // After processing, remove the list of exported entries, such that we don't
- // do the patching redundantly.
- Handle<FixedArray> new_deopt_data =
- isolate->factory()->CopyFixedArrayUpTo(exp_deopt_data, 2, TENURED);
- lazy_compile_code->set_deoptimization_data(*new_deopt_data);
- }
-
- return compiled_code;
-}
-
-Address CompileLazy(Isolate* isolate) {
- HistogramTimerScope lazy_time_scope(
- isolate->counters()->wasm_lazy_compilation_time());
-
- // Find the wasm frame which triggered the lazy compile, to get the wasm
- // instance.
- StackFrameIterator it(isolate);
- // First frame: C entry stub.
- DCHECK(!it.done());
- DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
- it.Advance();
- // Second frame: WasmCompileLazy builtin.
- DCHECK(!it.done());
- Handle<WasmInstanceObject> instance;
- Maybe<uint32_t> func_index_to_compile = Nothing<uint32_t>();
- Handle<Object> exp_deopt_data_entry;
- const wasm::WasmCode* lazy_stub_or_copy =
- isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
- DCHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub_or_copy->kind());
- if (!lazy_stub_or_copy->IsAnonymous()) {
- // Then it's an indirect call or via JS->wasm wrapper.
- instance =
- handle(lazy_stub_or_copy->owner()->compiled_module()->owning_instance(),
- isolate);
- func_index_to_compile = Just(lazy_stub_or_copy->index());
- exp_deopt_data_entry =
- handle(instance->compiled_module()->lazy_compile_data()->get(
- static_cast<int>(lazy_stub_or_copy->index())),
- isolate);
- }
- it.Advance();
- // Third frame: The calling wasm code (direct or indirect), or js-to-wasm
- // wrapper.
- DCHECK(!it.done());
- DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
- Handle<Code> js_to_wasm_caller_code;
- const WasmCode* wasm_caller_code = nullptr;
- Maybe<uint32_t> offset = Nothing<uint32_t>();
- if (it.frame()->is_js_to_wasm()) {
- DCHECK(!instance.is_null());
- js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
- } else {
- wasm_caller_code =
- isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
- offset = Just(static_cast<uint32_t>(
- it.frame()->pc() - wasm_caller_code->instructions().start()));
- if (instance.is_null()) {
- // Then this is a direct call (otherwise we would have attached the
- // instance via deopt data to the lazy compile stub). Just use the
- // instance of the caller.
- instance = handle(
- wasm_caller_code->owner()->compiled_module()->owning_instance(),
- isolate);
- }
- }
-
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module());
-
- wasm::LazyCompilationOrchestrator* orchestrator =
- Managed<wasm::LazyCompilationOrchestrator>::cast(
- compiled_module->shared()->lazy_compilation_orchestrator())
- ->get();
- DCHECK(!orchestrator->IsFrozenForTesting());
-
- NativeModuleModificationScope native_module_modification_scope(
- compiled_module->GetNativeModule());
-
- const wasm::WasmCode* result = nullptr;
- // The caller may be js to wasm calling a function
- // also available for indirect calls.
- if (!js_to_wasm_caller_code.is_null()) {
- result = orchestrator->CompileFromJsToWasm(
- isolate, instance, js_to_wasm_caller_code,
- func_index_to_compile.ToChecked());
- } else {
- DCHECK_NOT_NULL(wasm_caller_code);
- if (func_index_to_compile.IsNothing() ||
- (!exp_deopt_data_entry.is_null() &&
- !exp_deopt_data_entry->IsFixedArray())) {
- result = orchestrator->CompileDirectCall(
- isolate, instance, func_index_to_compile, wasm_caller_code,
- offset.ToChecked());
- } else {
- result = orchestrator->CompileIndirectCall(
- isolate, instance, func_index_to_compile.ToChecked());
+// A helper class to prevent pathological patching behavior for indirect
+// references to code which must be updated after lazy compiles.
+// Utilizes a reverse mapping to prevent O(n^2) behavior.
+class IndirectPatcher {
+ public:
+ void Patch(WasmInstanceObject* caller_instance,
+ WasmInstanceObject* target_instance, int func_index,
+ Address old_target, const WasmCode* new_code) {
+ DisallowHeapAllocation no_gc;
+ TRACE_LAZY(
+ "IndirectPatcher::Patch(caller=%p, target=%p, func_index=%i, "
+ "old_target=%p, "
+ "new_code=%p)\n",
+ caller_instance, target_instance, func_index, old_target, new_code);
+ if (mapping_.size() == 0 || misses_ >= kMaxMisses) {
+ BuildMapping(caller_instance);
+ }
+ // Patch entries for the given function index.
+ WasmCodeManager* code_manager =
+ caller_instance->GetIsolate()->wasm_engine()->code_manager();
+ USE(code_manager);
+ auto& entries = mapping_[func_index];
+ int patched = 0;
+ for (auto index : entries) {
+ if (index < 0) {
+ // Imported function entry.
+ int i = -1 - index;
+ ImportedFunctionEntry entry(caller_instance, i);
+ if (entry.target() == old_target) {
+ DCHECK_EQ(
+ func_index,
+ code_manager->GetCodeFromStartAddress(entry.target())->index());
+ entry.set(target_instance, new_code);
+ patched++;
+ }
+ } else {
+ // Indirect function table entry.
+ int i = index;
+ IndirectFunctionTableEntry entry(caller_instance, i);
+ if (entry.target() == old_target) {
+ DCHECK_EQ(
+ func_index,
+ code_manager->GetCodeFromStartAddress(entry.target())->index());
+ entry.set(entry.sig_id(), target_instance, new_code);
+ patched++;
+ }
+ }
}
+ if (patched == 0) misses_++;
}
- DCHECK_NOT_NULL(result);
- int func_index = static_cast<int>(result->index());
- if (!exp_deopt_data_entry.is_null() && exp_deopt_data_entry->IsFixedArray()) {
- int patched = 0;
- Handle<FixedArray> exp_deopt_data =
- Handle<FixedArray>::cast(exp_deopt_data_entry);
-
- TRACE_LAZY("Patching %d position(s) in function tables.\n",
- exp_deopt_data->length() / 2);
-
- // See EnsureExportedLazyDeoptData: exp_deopt_data[0...(len-1)] are pairs
- // of <export_table, index> followed by undefined values. Use this
- // information here to patch all export tables.
- Address target = result->instructions().start();
- Handle<Foreign> foreign_holder =
- isolate->factory()->NewForeign(target, TENURED);
- for (int idx = 0, end = exp_deopt_data->length(); idx < end; idx += 2) {
- if (exp_deopt_data->get(idx)->IsUndefined(isolate)) break;
- DisallowHeapAllocation no_gc;
- int exp_index = Smi::ToInt(exp_deopt_data->get(idx + 1));
- FixedArray* exp_table = FixedArray::cast(exp_deopt_data->get(idx));
-
- if (WASM_CONTEXT_TABLES) {
- // TODO(titzer): patching of function tables for lazy compilation
- // only works for a single instance.
- instance->wasm_context()->get()->table[exp_index].target = target;
+ private:
+ void BuildMapping(WasmInstanceObject* caller_instance) {
+ mapping_.clear();
+ misses_ = 0;
+ TRACE_LAZY("BuildMapping for (caller=%p)...\n", caller_instance);
+ Isolate* isolate = caller_instance->GetIsolate();
+ WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
+ uint32_t num_imported_functions =
+ caller_instance->module()->num_imported_functions;
+ // Process the imported function entries.
+ for (unsigned i = 0; i < num_imported_functions; i++) {
+ ImportedFunctionEntry entry(caller_instance, i);
+ WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
+ if (code->kind() != WasmCode::kLazyStub) continue;
+ TRACE_LAZY(" +import[%u] -> #%d (%p)\n", i, code->index(),
+ code->instructions().start());
+ DCHECK(!entry.is_js_receiver_entry());
+ WasmInstanceObject* target_instance = entry.instance();
+ WasmCode* new_code =
+ target_instance->compiled_module()->GetNativeModule()->GetCode(
+ code->index());
+ if (new_code->kind() != WasmCode::kLazyStub) {
+ // Patch an imported function entry which is already compiled.
+ entry.set(target_instance, new_code);
} else {
- int table_index = compiler::FunctionTableCodeOffset(exp_index);
- DCHECK_EQ(Foreign::cast(exp_table->get(table_index))->foreign_address(),
- lazy_stub_or_copy->instructions().start());
-
- exp_table->set(table_index, *foreign_holder);
- ++patched;
+ int key = code->index();
+ int index = -1 - i;
+ mapping_[key].push_back(index);
}
}
- // After processing, remove the list of exported entries, such that we don't
- // do the patching redundantly.
- compiled_module->lazy_compile_data()->set(
- func_index, isolate->heap()->undefined_value());
- if (!WASM_CONTEXT_TABLES) {
- DCHECK_LT(0, patched);
- USE(patched);
+ // Process the indirect function table entries.
+ size_t ift_size = caller_instance->indirect_function_table_size();
+ for (unsigned i = 0; i < ift_size; i++) {
+ IndirectFunctionTableEntry entry(caller_instance, i);
+ if (entry.target() == nullptr) continue; // null IFT entry
+ WasmCode* code = code_manager->GetCodeFromStartAddress(entry.target());
+ if (code->kind() != WasmCode::kLazyStub) continue;
+ TRACE_LAZY(" +indirect[%u] -> #%d (lazy:%p)\n", i, code->index(),
+ code->instructions().start());
+ WasmInstanceObject* target_instance = entry.instance();
+ WasmCode* new_code =
+ target_instance->compiled_module()->GetNativeModule()->GetCode(
+ code->index());
+ if (new_code->kind() != WasmCode::kLazyStub) {
+ // Patch an indirect function table entry which is already compiled.
+ entry.set(entry.sig_id(), target_instance, new_code);
+ } else {
+ int key = code->index();
+ int index = i;
+ mapping_[key].push_back(index);
+ }
}
}
- return result->instructions().start();
-}
+ static constexpr int kMaxMisses = 5; // maximum misses before rebuilding
+ std::unordered_map<int, std::vector<int>> mapping_;
+ int misses_ = 0;
+};
compiler::ModuleEnv CreateModuleEnvFromCompiledModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
DisallowHeapAllocation no_gc;
WasmModule* module = compiled_module->shared()->module();
- if (FLAG_wasm_jit_to_native) {
- compiler::ModuleEnv result(module, std::vector<Address>{},
- std::vector<Handle<Code>>{},
- BUILTIN_CODE(isolate, WasmCompileLazy),
- compiled_module->use_trap_handler());
- return result;
- }
-
- std::vector<GlobalHandleAddress> function_tables;
-
- int num_function_tables = static_cast<int>(module->function_tables.size());
- FixedArray* ft =
- num_function_tables == 0 ? nullptr : compiled_module->function_tables();
- for (int i = 0; i < num_function_tables; ++i) {
- // TODO(clemensh): defer these handles for concurrent compilation.
- function_tables.push_back(WasmCompiledModule::GetTableValue(ft, i));
- }
-
- compiler::ModuleEnv result(module, std::move(function_tables),
- std::vector<Handle<Code>>{},
- BUILTIN_CODE(isolate, WasmCompileLazy),
- compiled_module->use_trap_handler());
+ compiler::ModuleEnv result(module, compiled_module->use_trap_handler());
return result;
}
-const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
- Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
+const wasm::WasmCode* LazyCompileFunction(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ int func_index) {
base::ElapsedTimer compilation_timer;
- compilation_timer.Start();
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
+ wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
+ static_cast<uint32_t>(func_index));
+ if (existing_code != nullptr &&
+ existing_code->kind() == wasm::WasmCode::kFunction) {
+ TRACE_LAZY("Function %d already compiled.\n", func_index);
+ return existing_code;
+ }
+ compilation_timer.Start();
// TODO(wasm): Refactor this to only get the name if it is really needed for
// tracing / debugging.
std::string func_name;
@@ -740,22 +489,6 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
TRACE_LAZY("Compiling function %s, %d.\n", func_name.c_str(), func_index);
- if (FLAG_wasm_jit_to_native) {
- wasm::WasmCode* existing_code = compiled_module->GetNativeModule()->GetCode(
- static_cast<uint32_t>(func_index));
- if (existing_code != nullptr &&
- existing_code->kind() == wasm::WasmCode::kFunction) {
- TRACE_LAZY("Function %d already compiled.\n", func_index);
- return existing_code;
- }
- } else {
- if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
- Code::WASM_FUNCTION) {
- TRACE_LAZY("Function %d already compiled.\n", func_index);
- return nullptr;
- }
- }
-
compiler::ModuleEnv module_env =
CreateModuleEnvFromCompiledModule(isolate, compiled_module);
@@ -773,9 +506,9 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
CStrVector(func_name.c_str()), func_index,
CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- WasmCodeWrapper code_wrapper = unit.FinishCompilation(&thrower);
+ wasm::WasmCode* wasm_code = unit.FinishCompilation(&thrower);
+
+ if (wasm::WasmCode::ShouldBeLogged(isolate)) wasm_code->LogCode(isolate);
// If there is a pending error, something really went wrong. The module was
// verified before starting execution with lazy compilation.
@@ -783,23 +516,11 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
// TODO(clemensh): According to the spec, we can actually skip validation at
// module creation time, and return a function that always traps here.
CHECK(!thrower.error());
- // Now specialize the generated code for this instance.
- // {code} is used only when !FLAG_wasm_jit_to_native, so it may be removed
- // when that flag is removed.
- Handle<Code> code;
- if (code_wrapper.IsCodeObject()) {
- code = code_wrapper.GetCode();
- AttachWasmFunctionInfo(isolate, code, instance, func_index);
- DCHECK_EQ(Builtins::kWasmCompileLazy,
- Code::cast(compiled_module->code_table()->get(func_index))
- ->builtin_index());
- compiled_module->code_table()->set(func_index, *code);
- }
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWasmCode(code_wrapper, SKIP_ICACHE_FLUSH);
+ // Now specialize the generated code for this instance.
+ CodeSpecialization code_specialization;
+ code_specialization.RelocateDirectCalls(compiled_module->GetNativeModule());
+ code_specialization.ApplyToWasmCode(wasm_code, SKIP_ICACHE_FLUSH);
int64_t func_size =
static_cast<int64_t>(func->code.end_offset() - func->code.offset());
int64_t compilation_time = compilation_timer.Elapsed().InMicroseconds();
@@ -807,24 +528,17 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFunction(
auto counters = isolate->counters();
counters->wasm_lazily_compiled_functions()->Increment();
- if (!code_wrapper.IsCodeObject()) {
- const wasm::WasmCode* wasm_code = code_wrapper.GetWasmCode();
- Assembler::FlushICache(wasm_code->instructions().start(),
- wasm_code->instructions().size());
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(wasm_code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(wasm_code->reloc_info().size()));
+ Assembler::FlushICache(wasm_code->instructions().start(),
+ wasm_code->instructions().size());
+ counters->wasm_generated_code_size()->Increment(
+ static_cast<int>(wasm_code->instructions().size()));
+ counters->wasm_reloc_size()->Increment(
+ static_cast<int>(wasm_code->reloc_info().size()));
- } else {
- Assembler::FlushICache(code->instruction_start(), code->instruction_size());
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
- }
counters->wasm_lazy_compilation_throughput()->AddSample(
compilation_time != 0 ? static_cast<int>(func_size / compilation_time)
: 0);
- return !code_wrapper.IsCodeObject() ? code_wrapper.GetWasmCode() : nullptr;
+ return wasm_code;
}
namespace {
@@ -840,226 +554,9 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
return byte_pos;
}
-Code* ExtractWasmToWasmCallee(Code* wasm_to_wasm) {
- DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
- // Find the one code target in this wrapper.
- RelocIterator it(wasm_to_wasm, RelocInfo::kCodeTargetMask);
- DCHECK(!it.done());
- Code* callee = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-#ifdef DEBUG
- it.next();
- DCHECK(it.done());
-#endif
- return callee;
-}
-
-const WasmCode* WasmExtractWasmToWasmCallee(const WasmCodeManager* code_manager,
- const WasmCode* wasm_to_wasm) {
- DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind());
- // Find the one code target in this wrapper.
- RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(),
- wasm_to_wasm->constant_pool(),
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
- DCHECK(!it.done());
- const WasmCode* callee =
- code_manager->LookupCode(it.rinfo()->js_to_wasm_address());
-#ifdef DEBUG
- it.next();
- DCHECK(it.done());
-#endif
- return callee;
-}
-
-// TODO(mtrofin): this should be a function again, when chromium:761307
-// is addressed. chromium:771171 is also related.
-#define WasmPatchWasmToWasmWrapper(isolate, wasm_to_wasm, new_target) \
- do { \
- TRACE_LAZY("Patching wasm-to-wasm wrapper.\n"); \
- DCHECK_EQ(WasmCode::kWasmToWasmWrapper, wasm_to_wasm->kind()); \
- NativeModuleModificationScope scope(wasm_to_wasm->owner()); \
- RelocIterator it(wasm_to_wasm->instructions(), wasm_to_wasm->reloc_info(), \
- wasm_to_wasm->constant_pool(), \
- RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL)); \
- DCHECK(!it.done()); \
- DCHECK_EQ(WasmCode::kLazyStub, \
- isolate->wasm_engine() \
- ->code_manager() \
- ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address()) \
- ->kind()); \
- it.rinfo()->set_js_to_wasm_address(new_target->instructions().start()); \
- it.next(); \
- DCHECK(it.done()); \
- } while (0)
-
-void PatchWasmToWasmWrapper(Isolate* isolate, Code* wasm_to_wasm,
- Code* new_target) {
- DCHECK_EQ(Code::WASM_TO_WASM_FUNCTION, wasm_to_wasm->kind());
- // Find the one code target in this wrapper.
- RelocIterator it(wasm_to_wasm, RelocInfo::kCodeTargetMask);
- DCHECK(!it.done());
- DCHECK_EQ(Builtins::kWasmCompileLazy,
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address())
- ->builtin_index());
- it.rinfo()->set_target_address(new_target->instruction_start());
-#ifdef DEBUG
- it.next();
- DCHECK(it.done());
-#endif
-}
-
-} // namespace
-
-Handle<Code> LazyCompilationOrchestrator::CompileLazyOnGCHeap(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
- int call_offset, int exported_func_index, bool patch_caller) {
- struct NonCompiledFunction {
- int offset;
- int func_index;
- };
- std::vector<NonCompiledFunction> non_compiled_functions;
- int func_to_return_idx = exported_func_index;
- Decoder decoder(nullptr, nullptr);
- bool is_js_to_wasm = caller->kind() == Code::JS_TO_WASM_FUNCTION;
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
-
- TRACE_LAZY(
- "Starting lazy compilation (func %d @%d, js-to-wasm: %d, "
- "patch caller: %d).\n",
- exported_func_index, call_offset, is_js_to_wasm, patch_caller);
-
- // If this lazy compile stub is being called through a wasm-to-wasm wrapper,
- // remember that code object.
- Handle<Code> wasm_to_wasm_callee;
-
- // For js-to-wasm wrappers, don't iterate the reloc info. There is just one
- // call site in there anyway.
- if (patch_caller && !is_js_to_wasm) {
- DisallowHeapAllocation no_gc;
- SourcePositionTableIterator source_pos_iterator(
- caller->SourcePositionTable());
- auto caller_func_info = GetWasmFunctionInfo(isolate, caller);
- Handle<WasmCompiledModule> caller_module(
- caller_func_info.instance.ToHandleChecked()->compiled_module(),
- isolate);
- SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
- const byte* func_bytes =
- module_bytes->GetChars() + caller_module->shared()
- ->module()
- ->functions[caller_func_info.func_index]
- .code.offset();
- Code* lazy_callee = nullptr;
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- // TODO(clemensh): Introduce safe_cast<T, bool> which (D)CHECKS
- // (depending on the bool) against limits of T and then static_casts.
- size_t offset_l = it.rinfo()->pc() - caller->instruction_start();
- DCHECK_GE(kMaxInt, offset_l);
- int offset = static_cast<int>(offset_l);
- // Call offset points to the instruction after the call. Remember the last
- // called code object before that offset.
- if (offset < call_offset) lazy_callee = callee;
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) continue;
- int byte_pos =
- AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
- int called_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- non_compiled_functions.push_back({offset, called_func_index});
- if (offset < call_offset) func_to_return_idx = called_func_index;
- }
- TRACE_LAZY("Found %zu non-compiled functions in caller.\n",
- non_compiled_functions.size());
- DCHECK_NOT_NULL(lazy_callee);
- if (lazy_callee->kind() == Code::WASM_TO_WASM_FUNCTION) {
- TRACE_LAZY("Callee is a wasm-to-wasm.\n");
- wasm_to_wasm_callee = handle(lazy_callee, isolate);
- // If we call a wasm-to-wasm wrapper, then this wrapper actually
- // tail-called the lazy compile stub. Find it in the wrapper.
- lazy_callee = ExtractWasmToWasmCallee(lazy_callee);
- // This lazy compile stub belongs to the instance that was passed.
- DCHECK_EQ(*instance,
- *GetWasmFunctionInfo(isolate, handle(lazy_callee, isolate))
- .instance.ToHandleChecked());
- DCHECK_LE(2, lazy_callee->deoptimization_data()->length());
- func_to_return_idx =
- Smi::ToInt(lazy_callee->deoptimization_data()->get(1));
- }
- DCHECK_EQ(Builtins::kWasmCompileLazy, lazy_callee->builtin_index());
- // There must be at least one call to patch (the one that lead to calling
- // the lazy compile stub).
- DCHECK(!non_compiled_functions.empty() || !wasm_to_wasm_callee.is_null());
- }
-
- // TODO(clemensh): compile all functions in non_compiled_functions in
- // background, wait for func_to_return_idx.
- CompileFunction(isolate, instance, func_to_return_idx);
-
- Handle<Code> compiled_function(
- Code::cast(compiled_module->code_table()->get(func_to_return_idx)),
- isolate);
- DCHECK_EQ(Code::WASM_FUNCTION, compiled_function->kind());
-
- if (patch_caller || is_js_to_wasm) {
- DisallowHeapAllocation no_gc;
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- // Now patch the code object with all functions which are now compiled.
- int idx = 0;
- int patched = 0;
- for (RelocIterator it(*caller, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- Code* callee =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (callee->builtin_index() != Builtins::kWasmCompileLazy) {
- // If the callee is the wasm-to-wasm wrapper triggering this lazy
- // compilation, patch it. If is_js_to_wasm is set, we did not set the
- // wasm_to_wasm_callee, so just check the code kind (this is the only
- // call in that wrapper anyway).
- if ((is_js_to_wasm && callee->kind() == Code::WASM_TO_WASM_FUNCTION) ||
- (!wasm_to_wasm_callee.is_null() &&
- callee == *wasm_to_wasm_callee)) {
- TRACE_LAZY("Patching wasm-to-wasm wrapper.\n");
- PatchWasmToWasmWrapper(isolate, callee, *compiled_function);
- ++patched;
- }
- continue;
- }
- int called_func_index = func_to_return_idx;
- if (!is_js_to_wasm) {
- DCHECK_GT(non_compiled_functions.size(), idx);
- called_func_index = non_compiled_functions[idx].func_index;
- DCHECK_EQ(non_compiled_functions[idx].offset,
- it.rinfo()->pc() - caller->instruction_start());
- ++idx;
- }
- // Check that the callee agrees with our assumed called_func_index.
- DCHECK_IMPLIES(callee->deoptimization_data()->length() > 0,
- Smi::ToInt(callee->deoptimization_data()->get(1)) ==
- called_func_index);
- Handle<Code> callee_compiled(
- Code::cast(compiled_module->code_table()->get(called_func_index)));
- if (callee_compiled->builtin_index() == Builtins::kWasmCompileLazy) {
- DCHECK_NE(func_to_return_idx, called_func_index);
- continue;
- }
- DCHECK_EQ(Code::WASM_FUNCTION, callee_compiled->kind());
- it.rinfo()->set_target_address(callee_compiled->instruction_start());
- ++patched;
- }
- DCHECK_EQ(non_compiled_functions.size(), idx);
- TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
- DCHECK_LT(0, patched);
- USE(patched);
- }
-
- return compiled_function;
-}
-
-const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
+const wasm::WasmCode* LazyCompileFromJsToWasm(
Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<Code> js_to_wasm_caller, uint32_t exported_func_index) {
+ Handle<Code> js_to_wasm_caller, uint32_t callee_func_index) {
Decoder decoder(nullptr, nullptr);
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
@@ -1067,37 +564,24 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
TRACE_LAZY(
"Starting lazy compilation (func %u, js_to_wasm: true, patch caller: "
"true). \n",
- exported_func_index);
- CompileFunction(isolate, instance, exported_func_index);
+ callee_func_index);
+ LazyCompileFunction(isolate, compiled_module, callee_func_index);
{
DisallowHeapAllocation no_gc;
- int patched = 0;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
RelocIterator it(*js_to_wasm_caller,
RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL));
DCHECK(!it.done());
- wasm::WasmCode* current_callee =
- isolate->wasm_engine()->code_manager()->LookupCode(
- it.rinfo()->js_to_wasm_address());
const wasm::WasmCode* callee_compiled =
- compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ compiled_module->GetNativeModule()->GetCode(callee_func_index);
DCHECK_NOT_NULL(callee_compiled);
- if (current_callee->kind() == WasmCode::kWasmToWasmWrapper) {
- WasmPatchWasmToWasmWrapper(isolate, current_callee, callee_compiled);
- ++patched;
- } else {
- DCHECK_EQ(WasmCode::kLazyStub,
- isolate->wasm_engine()
- ->code_manager()
- ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
- ->kind());
- it.rinfo()->set_js_to_wasm_address(
- callee_compiled->instructions().start());
- ++patched;
- }
- DCHECK_LT(0, patched);
- TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
- USE(patched);
+ DCHECK_EQ(WasmCode::kLazyStub,
+ isolate->wasm_engine()
+ ->code_manager()
+ ->GetCodeFromStartAddress(it.rinfo()->js_to_wasm_address())
+ ->kind());
+ it.rinfo()->set_js_to_wasm_address(callee_compiled->instructions().start());
+ TRACE_LAZY("Patched 1 location in js-to-wasm %p.\n", *js_to_wasm_caller);
#ifdef DEBUG
it.next();
@@ -1106,46 +590,53 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileFromJsToWasm(
}
wasm::WasmCode* ret =
- compiled_module->GetNativeModule()->GetCode(exported_func_index);
+ compiled_module->GetNativeModule()->GetCode(callee_func_index);
DCHECK_NOT_NULL(ret);
DCHECK_EQ(wasm::WasmCode::kFunction, ret->kind());
return ret;
}
-const wasm::WasmCode* LazyCompilationOrchestrator::CompileIndirectCall(
+const wasm::WasmCode* LazyCompileIndirectCall(
Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t func_index) {
TRACE_LAZY(
"Starting lazy compilation (func %u, js_to_wasm: false, patch caller: "
"false). \n",
func_index);
- return CompileFunction(isolate, instance, func_index);
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ return LazyCompileFunction(isolate, compiled_module, func_index);
}
-const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- Maybe<uint32_t> maybe_func_to_return_idx, const wasm::WasmCode* wasm_caller,
- int call_offset) {
- std::vector<Maybe<uint32_t>> non_compiled_functions;
+const wasm::WasmCode* LazyCompileDirectCall(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ const wasm::WasmCode* wasm_caller,
+ int32_t caller_ret_offset) {
+ DCHECK_LE(0, caller_ret_offset);
+
Decoder decoder(nullptr, nullptr);
- WasmCode* last_callee = nullptr;
+ // Gather all the targets of direct calls inside the code of {wasm_caller}
+ // and place their function indexes in {direct_callees}.
+ std::vector<int32_t> direct_callees;
+ // The last one before {caller_ret_offset} must be the call that triggered
+ // this lazy compilation.
+ int callee_pos = -1;
+ uint32_t num_non_compiled_callees = 0; // For stats.
{
DisallowHeapAllocation no_gc;
Handle<WasmCompiledModule> caller_module(
- wasm_caller->owner()->compiled_module(), isolate);
+ wasm_caller->native_module()->compiled_module(), isolate);
SeqOneByteString* module_bytes = caller_module->shared()->module_bytes();
uint32_t caller_func_index = wasm_caller->index();
SourcePositionTableIterator source_pos_iterator(
- Handle<ByteArray>(ByteArray::cast(
- caller_module->source_positions()->get(caller_func_index))));
+ wasm_caller->source_positions());
const byte* func_bytes =
module_bytes->GetChars() + caller_module->shared()
->module()
->functions[caller_func_index]
.code.offset();
- int num_non_compiled_functions = 0;
for (RelocIterator it(wasm_caller->instructions(),
wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
@@ -1161,72 +652,62 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
WasmCode* callee = isolate->wasm_engine()->code_manager()->LookupCode(
it.rinfo()->target_address());
- if (offset < call_offset) last_callee = callee;
- if (callee->kind() != WasmCode::kLazyStub) {
- non_compiled_functions.push_back(Nothing<uint32_t>());
+ if (callee->kind() == WasmCode::kLazyStub) {
+ // The callee has not been compiled.
+ ++num_non_compiled_callees;
+ int32_t callee_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ DCHECK_LT(callee_func_index,
+ caller_module->GetNativeModule()->FunctionCount());
+ // {caller_ret_offset} points to one instruction after the call.
+ // Remember the last called function before that offset.
+ if (offset < caller_ret_offset) {
+ callee_pos = static_cast<int>(direct_callees.size());
+ }
+ direct_callees.push_back(callee_func_index);
+ } else {
+ // If the callee is not the lazy compile stub, assume this callee
+ // has already been compiled.
+ direct_callees.push_back(-1);
continue;
}
- ++num_non_compiled_functions;
-
- uint32_t called_func_index =
- ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
- DCHECK_LT(called_func_index,
- caller_module->GetNativeModule()->FunctionCount());
- non_compiled_functions.push_back(Just(called_func_index));
- // Call offset one instruction after the call. Remember the last called
- // function before that offset.
- if (offset < call_offset) {
- maybe_func_to_return_idx = Just(called_func_index);
- }
}
- TRACE_LAZY("Found %d non-compiled functions in caller.\n",
- num_non_compiled_functions);
- USE(num_non_compiled_functions);
- }
- uint32_t func_to_return_idx = 0;
-
- if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
- const WasmCode* actual_callee = WasmExtractWasmToWasmCallee(
- isolate->wasm_engine()->code_manager(), last_callee);
- func_to_return_idx = actual_callee->index();
- } else {
- func_to_return_idx = maybe_func_to_return_idx.ToChecked();
+ TRACE_LAZY("Found %d non-compiled callees in function=%p.\n",
+ num_non_compiled_callees, wasm_caller);
+ USE(num_non_compiled_callees);
}
+ CHECK_LE(0, callee_pos);
+ // TODO(wasm): compile all functions in non_compiled_callees in
+ // background, wait for direct_callees[callee_pos].
+ auto callee_func_index = direct_callees[callee_pos];
TRACE_LAZY(
- "Starting lazy compilation (func %u @%d, js_to_wasm: false, patch "
- "caller: true). \n",
- func_to_return_idx, call_offset);
+ "Starting lazy compilation (function=%p retaddr=+%d direct_callees[%d] "
+ "-> %d).\n",
+ wasm_caller, caller_ret_offset, callee_pos, callee_func_index);
- // TODO(clemensh): compile all functions in non_compiled_functions in
- // background, wait for func_to_return_idx.
- const WasmCode* ret = CompileFunction(isolate, instance, func_to_return_idx);
+ Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+ isolate);
+ const WasmCode* ret =
+ LazyCompileFunction(isolate, compiled_module, callee_func_index);
DCHECK_NOT_NULL(ret);
int patched = 0;
- if (last_callee->kind() == WasmCode::kWasmToWasmWrapper) {
- // We can finish it all here by compiling the target wasm function and
- // patching the wasm_to_wasm caller.
- WasmPatchWasmToWasmWrapper(isolate, last_callee, ret);
- ++patched;
- } else {
- Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
- isolate);
+ {
DisallowHeapAllocation no_gc;
- // Now patch the code object with all functions which are now compiled. This
- // will pick up any other compiled functions, not only {ret}.
- size_t idx = 0;
+ // Now patch the code in {wasm_caller} with all functions which are now
+ // compiled. This will pick up any other compiled functions, not only {ret}.
+ size_t pos = 0;
for (RelocIterator
it(wasm_caller->instructions(), wasm_caller->reloc_info(),
wasm_caller->constant_pool(),
RelocInfo::ModeMask(RelocInfo::WASM_CALL));
- !it.done(); it.next(), ++idx) {
- auto& info = non_compiled_functions[idx];
- if (info.IsNothing()) continue;
- uint32_t lookup = info.ToChecked();
+ !it.done(); it.next(), ++pos) {
+ auto callee_index = direct_callees[pos];
+ if (callee_index < 0) continue; // callee already compiled.
const WasmCode* callee_compiled =
- compiled_module->GetNativeModule()->GetCode(lookup);
+ compiled_module->GetNativeModule()->GetCode(callee_index);
if (callee_compiled->kind() != WasmCode::kFunction) continue;
DCHECK_EQ(WasmCode::kLazyStub,
isolate->wasm_engine()
@@ -1237,249 +718,408 @@ const wasm::WasmCode* LazyCompilationOrchestrator::CompileDirectCall(
callee_compiled->instructions().start());
++patched;
}
- DCHECK_EQ(non_compiled_functions.size(), idx);
+ DCHECK_EQ(direct_callees.size(), pos);
}
DCHECK_LT(0, patched);
- TRACE_LAZY("Patched %d location(s) in the caller.\n", patched);
+ TRACE_LAZY("Patched %d calls(s) in %p.\n", patched, wasm_caller);
USE(patched);
return ret;
}
-ModuleCompiler::CodeGenerationSchedule::CodeGenerationSchedule(
- base::RandomNumberGenerator* random_number_generator, size_t max_memory)
- : random_number_generator_(random_number_generator),
- max_memory_(max_memory) {
- DCHECK_NOT_NULL(random_number_generator_);
- DCHECK_GT(max_memory_, 0);
+} // namespace
+
+Address CompileLazy(Isolate* isolate,
+ Handle<WasmInstanceObject> target_instance) {
+ HistogramTimerScope lazy_time_scope(
+ isolate->counters()->wasm_lazy_compilation_time());
+
+ //==========================================================================
+ // Begin stack walk.
+ //==========================================================================
+ StackFrameIterator it(isolate);
+
+ //==========================================================================
+ // First frame: C entry stub.
+ //==========================================================================
+ DCHECK(!it.done());
+ DCHECK_EQ(StackFrame::EXIT, it.frame()->type());
+ it.Advance();
+
+ //==========================================================================
+ // Second frame: WasmCompileLazy builtin.
+ //==========================================================================
+ DCHECK(!it.done());
+ int target_func_index = -1;
+ bool indirectly_called = false;
+ const wasm::WasmCode* lazy_stub =
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
+ CHECK_EQ(wasm::WasmCode::kLazyStub, lazy_stub->kind());
+ if (!lazy_stub->IsAnonymous()) {
+ // If the lazy stub is not "anonymous", then its copy encodes the target
+ // function index. Used for import and indirect calls.
+ target_func_index = lazy_stub->index();
+ indirectly_called = true;
+ }
+ it.Advance();
+
+ //==========================================================================
+ // Third frame: The calling wasm code (direct or indirect), or js-to-wasm
+ // wrapper.
+ //==========================================================================
+ DCHECK(!it.done());
+ DCHECK(it.frame()->is_js_to_wasm() || it.frame()->is_wasm_compiled());
+ Handle<Code> js_to_wasm_caller_code;
+ const WasmCode* wasm_caller_code = nullptr;
+ int32_t caller_ret_offset = -1;
+ if (it.frame()->is_js_to_wasm()) {
+ js_to_wasm_caller_code = handle(it.frame()->LookupCode(), isolate);
+ // This wasn't actually an indirect call, but a JS->wasm call.
+ indirectly_called = false;
+ } else {
+ wasm_caller_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(it.frame()->pc());
+ auto offset = it.frame()->pc() - wasm_caller_code->instructions().start();
+ caller_ret_offset = static_cast<int32_t>(offset);
+ DCHECK_EQ(offset, caller_ret_offset);
+ }
+
+ //==========================================================================
+ // Begin compilation.
+ //==========================================================================
+ Handle<WasmCompiledModule> compiled_module(
+ target_instance->compiled_module());
+
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ DCHECK(!native_module->lazy_compile_frozen());
+
+ NativeModuleModificationScope native_module_modification_scope(native_module);
+
+ const wasm::WasmCode* result = nullptr;
+
+ if (!js_to_wasm_caller_code.is_null()) {
+ result = LazyCompileFromJsToWasm(isolate, target_instance,
+ js_to_wasm_caller_code, target_func_index);
+ DCHECK_NOT_NULL(result);
+ DCHECK_EQ(target_func_index, result->index());
+ } else {
+ DCHECK_NOT_NULL(wasm_caller_code);
+ if (target_func_index < 0) {
+ result = LazyCompileDirectCall(isolate, target_instance, wasm_caller_code,
+ caller_ret_offset);
+ DCHECK_NOT_NULL(result);
+ } else {
+ result =
+ LazyCompileIndirectCall(isolate, target_instance, target_func_index);
+ DCHECK_NOT_NULL(result);
+ }
+ }
+
+ //==========================================================================
+ // Update import and indirect function tables in the caller.
+ //==========================================================================
+ if (indirectly_called) {
+ DCHECK_NOT_NULL(wasm_caller_code);
+ Handle<WasmInstanceObject> caller_instance(
+ WasmInstanceObject::GetOwningInstance(wasm_caller_code), isolate);
+ if (!caller_instance->has_managed_indirect_patcher()) {
+ auto patcher = Managed<IndirectPatcher>::Allocate(isolate);
+ caller_instance->set_managed_indirect_patcher(*patcher);
+ }
+ IndirectPatcher* patcher = Managed<IndirectPatcher>::cast(
+ caller_instance->managed_indirect_patcher())
+ ->get();
+ Address old_target = lazy_stub->instructions().start();
+ patcher->Patch(*caller_instance, *target_instance, target_func_index,
+ old_target, result);
+ }
+
+ return result->instructions().start();
}
-void ModuleCompiler::CodeGenerationSchedule::Schedule(
- std::unique_ptr<compiler::WasmCompilationUnit>&& item) {
- size_t cost = item->memory_cost();
- schedule_.push_back(std::move(item));
- allocated_memory_.Increment(cost);
+namespace {
+bool compile_lazy(const WasmModule* module) {
+ return FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
}
-bool ModuleCompiler::CodeGenerationSchedule::CanAcceptWork() const {
- return (!throttle_ || allocated_memory_.Value() <= max_memory_);
+void FlushICache(const wasm::NativeModule* native_module) {
+ for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
+ const wasm::WasmCode* code = native_module->GetCode(i);
+ if (code == nullptr) continue;
+ Assembler::FlushICache(code->instructions().start(),
+ code->instructions().size());
+ }
}
-bool ModuleCompiler::CodeGenerationSchedule::ShouldIncreaseWorkload() const {
- // Half the memory is unused again, we can increase the workload again.
- return (!throttle_ || allocated_memory_.Value() <= max_memory_ / 2);
+void FlushICache(Handle<FixedArray> functions) {
+ for (int i = 0, e = functions->length(); i < e; ++i) {
+ if (!functions->get(i)->IsCode()) continue;
+ Code* code = Code::cast(functions->get(i));
+ Assembler::FlushICache(code->raw_instruction_start(),
+ code->raw_instruction_size());
+ }
}
-std::unique_ptr<compiler::WasmCompilationUnit>
-ModuleCompiler::CodeGenerationSchedule::GetNext() {
- DCHECK(!IsEmpty());
- size_t index = GetRandomIndexInSchedule();
- auto ret = std::move(schedule_[index]);
- std::swap(schedule_[schedule_.size() - 1], schedule_[index]);
- schedule_.pop_back();
- allocated_memory_.Decrement(ret->memory_cost());
- return ret;
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-size_t ModuleCompiler::CodeGenerationSchedule::GetRandomIndexInSchedule() {
- double factor = random_number_generator_->NextDouble();
- size_t index = (size_t)(factor * schedule_.size());
- DCHECK_GE(index, 0);
- DCHECK_LT(index, schedule_.size());
- return index;
+void RecordStats(const Code* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(code->body_size());
+ counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
}
-ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
- Handle<Code> centry_stub,
- wasm::NativeModule* native_module)
- : isolate_(isolate),
- module_(module),
- async_counters_(isolate->async_counters()),
- executed_units_(
- isolate->random_number_generator(),
- (isolate->heap()->memory_allocator()->code_range()->valid()
- ? isolate->heap()->memory_allocator()->code_range()->size()
- : isolate->heap()->code_space()->Capacity()) /
- 2),
- num_background_tasks_(
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
- stopped_compilation_tasks_(num_background_tasks_),
- centry_stub_(centry_stub),
- native_module_(native_module) {}
-
-// The actual runnable task that performs compilations in the background.
-void ModuleCompiler::OnBackgroundTaskStopped() {
- base::LockGuard<base::Mutex> guard(&tasks_mutex_);
- ++stopped_compilation_tasks_;
- DCHECK_LE(stopped_compilation_tasks_, num_background_tasks_);
+void RecordStats(const wasm::WasmCode* code, Counters* counters) {
+ counters->wasm_generated_code_size()->Increment(
+ static_cast<int>(code->instructions().size()));
+ counters->wasm_reloc_size()->Increment(
+ static_cast<int>(code->reloc_info().size()));
+}
+
+void RecordStats(Handle<FixedArray> functions, Counters* counters) {
+ DisallowHeapAllocation no_gc;
+ for (int i = 0; i < functions->length(); ++i) {
+ Object* val = functions->get(i);
+ if (val->IsCode()) RecordStats(Code::cast(val), counters);
+ }
+}
+
+void RecordStats(const wasm::NativeModule* native_module, Counters* counters) {
+ for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
+ const wasm::WasmCode* code = native_module->GetCode(i);
+ if (code != nullptr) RecordStats(code, counters);
+ }
+}
+
+bool in_bounds(uint32_t offset, size_t size, size_t upper) {
+ return offset + size <= upper && offset + size >= offset;
+}
+
+using WasmInstanceMap =
+ IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
+
+double MonotonicallyIncreasingTimeInMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ base::Time::kMillisecondsPerSecond;
+}
+
+std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
+ Isolate* isolate, WasmModule* module) {
+ // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)?
+ bool use_trap_handler = trap_handler::IsTrapHandlerEnabled();
+ return base::make_unique<compiler::ModuleEnv>(module, use_trap_handler);
+}
+
+Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
+ WasmModule* module,
+ Handle<FixedArray> export_wrappers,
+ compiler::ModuleEnv* env) {
+ Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
+ isolate, module, export_wrappers, env->use_trap_handler);
+ return compiled_module;
}
-// Run by each compilation task. The no_finisher_callback is called
-// within the result_mutex_ lock when no finishing task is running,
-// i.e. when the finisher_is_running_ flag is not set.
-bool ModuleCompiler::FetchAndExecuteCompilationUnit(
- std::function<void()> no_finisher_callback) {
+size_t GetMaxUsableMemorySize(Isolate* isolate) {
+ return isolate->heap()->memory_allocator()->code_range()->valid()
+ ? isolate->heap()->memory_allocator()->code_range()->size()
+ : isolate->heap()->code_space()->Capacity();
+}
+
+// The CompilationUnitBuilder builds compilation units and stores them in an
+// internal buffer. The buffer is moved into the working queue of the
+// CompilationState when {Commit} is called.
+class CompilationUnitBuilder {
+ public:
+ explicit CompilationUnitBuilder(NativeModule* native_module,
+ compiler::ModuleEnv* module_env,
+ Handle<Code> centry_stub)
+ : native_module_(native_module),
+ compilation_state_(native_module->compilation_state()),
+ module_env_(module_env),
+ centry_stub_(centry_stub) {}
+
+ void AddUnit(const WasmFunction* function, uint32_t buffer_offset,
+ Vector<const uint8_t> bytes, WasmName name) {
+ units_.emplace_back(new compiler::WasmCompilationUnit(
+ compilation_state_->isolate(), module_env_, native_module_,
+ wasm::FunctionBody{function->sig, buffer_offset, bytes.begin(),
+ bytes.end()},
+ name, function->func_index, centry_stub_,
+ compiler::WasmCompilationUnit::GetDefaultCompilationMode(),
+ compilation_state_->isolate()->async_counters().get()));
+ }
+
+ bool Commit() {
+ if (units_.empty()) return false;
+ compilation_state_->AddCompilationUnits(units_);
+ units_.clear();
+ return true;
+ }
+
+ void Clear() { units_.clear(); }
+
+ private:
+ NativeModule* native_module_;
+ CompilationState* compilation_state_;
+ compiler::ModuleEnv* module_env_;
+ Handle<Code> centry_stub_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> units_;
+};
+
+// Run by each compilation task and by the main thread (i.e. in both
+// foreground and background threads). The no_finisher_callback is called
+// within the result_mutex_ lock when no finishing task is running, i.e. when
+// the finisher_is_running_ flag is not set.
+bool FetchAndExecuteCompilationUnit(CompilationState* compilation_state) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
DisallowCodeDependencyChange no_dependency_change;
- std::unique_ptr<compiler::WasmCompilationUnit> unit;
- {
- base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
- if (compilation_units_.empty()) return false;
- unit = std::move(compilation_units_.back());
- compilation_units_.pop_back();
- }
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ compilation_state->GetNextCompilationUnit();
+ if (unit == nullptr) return false;
+
unit->ExecuteCompilation();
- {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- executed_units_.Schedule(std::move(unit));
- if (no_finisher_callback != nullptr && !finisher_is_running_) {
- no_finisher_callback();
- // We set the flag here so that not more than one finisher is started.
- finisher_is_running_ = true;
- }
- }
+ compilation_state->ScheduleUnitForFinishing(unit);
+
return true;
}
-size_t ModuleCompiler::InitializeCompilationUnits(
- const std::vector<WasmFunction>& functions,
- const ModuleWireBytes& wire_bytes, compiler::ModuleEnv* module_env) {
+size_t GetNumFunctionsToCompile(const std::vector<WasmFunction>& functions,
+ compiler::ModuleEnv* module_env) {
+ // TODO(kimanh): Remove, FLAG_skip_compiling_wasm_funcs: previously used for
+ // debugging, and now not necessarily working anymore.
uint32_t start = module_env->module->num_imported_functions +
FLAG_skip_compiling_wasm_funcs;
uint32_t num_funcs = static_cast<uint32_t>(functions.size());
uint32_t funcs_to_compile = start > num_funcs ? 0 : num_funcs - start;
- CompilationUnitBuilder builder(this);
+ return funcs_to_compile;
+}
+
+void InitializeCompilationUnits(const std::vector<WasmFunction>& functions,
+ const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ Handle<Code> centry_stub,
+ NativeModule* native_module) {
+ uint32_t start = module_env->module->num_imported_functions +
+ FLAG_skip_compiling_wasm_funcs;
+ uint32_t num_funcs = static_cast<uint32_t>(functions.size());
+
+ CompilationUnitBuilder builder(native_module, module_env, centry_stub);
for (uint32_t i = start; i < num_funcs; ++i) {
const WasmFunction* func = &functions[i];
uint32_t buffer_offset = func->code.offset();
Vector<const uint8_t> bytes(wire_bytes.start() + func->code.offset(),
func->code.end_offset() - func->code.offset());
- WasmName name = wire_bytes.GetName(func);
- DCHECK_IMPLIES(FLAG_wasm_jit_to_native, native_module_ != nullptr);
- builder.AddUnit(module_env, native_module_, func, buffer_offset, bytes,
- name);
- }
- builder.Commit();
- return funcs_to_compile;
-}
-
-void ModuleCompiler::RestartCompilationTasks() {
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- std::shared_ptr<v8::TaskRunner> task_runner =
- V8::GetCurrentPlatform()->GetBackgroundTaskRunner(v8_isolate);
- base::LockGuard<base::Mutex> guard(&tasks_mutex_);
- for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
- task_runner->PostTask(base::make_unique<CompilationTask>(this));
+ WasmName name = wire_bytes.GetName(func, module_env->module);
+ DCHECK_NOT_NULL(native_module);
+ builder.AddUnit(func, buffer_offset, bytes, name);
}
+ builder.Commit();
}
-size_t ModuleCompiler::FinishCompilationUnits(
- std::vector<Handle<Code>>& results, ErrorThrower* thrower) {
- size_t finished = 0;
+void FinishCompilationUnits(CompilationState* compilation_state,
+ ErrorThrower* thrower) {
while (true) {
- int func_index = -1;
- WasmCodeWrapper result = FinishCompilationUnit(thrower, &func_index);
- if (func_index < 0) break;
- ++finished;
- DCHECK_IMPLIES(result.is_null(), thrower->error());
- if (result.is_null()) break;
- if (result.IsCodeObject()) {
- results[func_index] = result.GetCode();
- }
- }
- bool do_restart;
- {
- base::LockGuard<base::Mutex> guard(&compilation_units_mutex_);
- do_restart = !compilation_units_.empty();
- }
- if (do_restart) RestartCompilationTasks();
- return finished;
-}
+ if (compilation_state->failed()) break;
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ compilation_state->GetNextExecutedUnit();
+ if (unit == nullptr) break;
+ wasm::WasmCode* result = unit->FinishCompilation(thrower);
-void ModuleCompiler::SetFinisherIsRunning(bool value) {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- finisher_is_running_ = value;
-}
-
-WasmCodeWrapper ModuleCompiler::FinishCompilationUnit(ErrorThrower* thrower,
- int* func_index) {
- std::unique_ptr<compiler::WasmCompilationUnit> unit;
- {
- base::LockGuard<base::Mutex> guard(&result_mutex_);
- if (executed_units_.IsEmpty()) return {};
- unit = executed_units_.GetNext();
+ // Update the compilation state.
+ compilation_state->OnFinishedUnit(NotifyCompilationCallback::kNoNotify);
+ DCHECK_IMPLIES(result == nullptr, thrower->error());
+ if (result == nullptr) break;
+ }
+ if (!compilation_state->failed()) {
+ compilation_state->RestartBackgroundTasks();
}
- *func_index = unit->func_index();
- return unit->FinishCompilation(thrower);
}
-void ModuleCompiler::CompileInParallel(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
+void CompileInParallel(Isolate* isolate, NativeModule* native_module,
+ const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ Handle<Code> centry_stub, ErrorThrower* thrower) {
const WasmModule* module = module_env->module;
// Data structures for the parallel compilation.
//-----------------------------------------------------------------------
// For parallel compilation:
// 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- // 2) The main thread spawns {CompilationTask} instances which run on
+ // and stores them in the vector {compilation_units} within the
+ // {compilation_state}. By adding units to the {compilation_state}, new
+ // {BackgroundCompileTasks} instances are spawned which run on
// the background threads.
- // 3.a) The background threads and the main thread pick one compilation
+ // 2.a) The background threads and the main thread pick one compilation
// unit at a time and execute the parallel phase of the compilation
// unit. After finishing the execution of the parallel phase, the
// result is enqueued in {executed_units}.
- // 3.b) If {executed_units} contains a compilation unit, the main thread
+ // 2.b) If {executed_units} contains a compilation unit, the main thread
// dequeues it and finishes the compilation.
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {CompilationTask} instances to finish.
- // 5) The main thread finishes the compilation.
+ // 3) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {BackgroundCompileTasks} instances to finish.
+ // 4) The main thread finishes the compilation.
// Turn on the {CanonicalHandleScope} so that the background threads can
// use the node cache.
- CanonicalHandleScope canonical(isolate_);
+ CanonicalHandleScope canonical(isolate);
- // 1) The main thread allocates a compilation unit for each wasm function
- // and stores them in the vector {compilation_units}.
- InitializeCompilationUnits(module->functions, wire_bytes, module_env);
- executed_units_.EnableThrottling();
-
- // 2) The main thread spawns {CompilationTask} instances which run on
- // the background threads.
- RestartCompilationTasks();
+ CompilationState* compilation_state = native_module->compilation_state();
+ // Make sure that no foreground task is spawned for finishing
+ // the compilation units. This foreground thread will be
+ // responsible for finishing compilation.
+ compilation_state->SetFinisherIsRunning(true);
+ size_t functions_count =
+ GetNumFunctionsToCompile(module->functions, module_env);
+ compilation_state->SetNumberOfFunctionsToCompile(functions_count);
- // 3.a) The background threads and the main thread pick one compilation
+ // 1) The main thread allocates a compilation unit for each wasm function
+ // and stores them in the vector {compilation_units} within the
+ // {compilation_state}. By adding units to the {compilation_state}, new
+ // {BackgroundCompileTask} instances are spawned which run on
+ // background threads.
+ InitializeCompilationUnits(module->functions, wire_bytes, module_env,
+ centry_stub, native_module);
+
+ // 2.a) The background threads and the main thread pick one compilation
// unit at a time and execute the parallel phase of the compilation
// unit. After finishing the execution of the parallel phase, the
// result is enqueued in {executed_units}.
// The foreground task bypasses waiting on memory threshold, because
// its results will immediately be converted to code (below).
- while (FetchAndExecuteCompilationUnit()) {
- // 3.b) If {executed_units} contains a compilation unit, the main thread
+ while (FetchAndExecuteCompilationUnit(compilation_state)) {
+ // 2.b) If {executed_units} contains a compilation unit, the main thread
// dequeues it and finishes the compilation unit. Compilation units
// are finished concurrently to the background threads to save
// memory.
- FinishCompilationUnits(results, thrower);
+ FinishCompilationUnits(compilation_state, thrower);
+
+ if (compilation_state->failed()) break;
+ }
+
+ // 3) After the parallel phase of all compilation units has started, the
+ // main thread waits for all {BackgroundCompileTasks} instances to finish -
+ // which happens once they all realize there's no next work item to
+ // process. If compilation already failed, all background tasks have
+ // already been canceled in {FinishCompilationUnits}, and there are
+ // no units to finish.
+ if (!compilation_state->failed()) {
+ compilation_state->CancelAndWait();
+
+ // 4) Finish all compilation units which have been executed while we waited.
+ FinishCompilationUnits(compilation_state, thrower);
}
- // 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {CompilationTask} instances to finish - which
- // happens once they all realize there's no next work item to process.
- background_task_manager_.CancelAndWait();
- // Finish all compilation units which have been executed while we waited.
- FinishCompilationUnits(results, thrower);
}
-void ModuleCompiler::CompileSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
+void CompileSequentially(Isolate* isolate, NativeModule* native_module,
+ const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ ErrorThrower* thrower) {
DCHECK(!thrower->error());
const WasmModule* module = module_env->module;
@@ -1489,23 +1129,20 @@ void ModuleCompiler::CompileSequentially(const ModuleWireBytes& wire_bytes,
if (func.imported) continue; // Imports are compiled at instantiation time.
// Compile the function.
- WasmCodeWrapper code = compiler::WasmCompilationUnit::CompileWasmFunction(
- native_module_, thrower, isolate_, wire_bytes, module_env, &func);
- if (code.is_null()) {
- TruncatedUserString<> name(wire_bytes.GetName(&func));
+ wasm::WasmCode* code = compiler::WasmCompilationUnit::CompileWasmFunction(
+ native_module, thrower, isolate, wire_bytes, module_env, &func);
+ if (code == nullptr) {
+ TruncatedUserString<> name(wire_bytes.GetName(&func, module));
thrower->CompileError("Compilation of #%d:%.*s failed.", i, name.length(),
name.start());
break;
}
- if (code.IsCodeObject()) {
- results[i] = code.GetCode();
- }
}
}
-void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
- compiler::ModuleEnv* module_env,
- ErrorThrower* thrower) {
+void ValidateSequentially(Isolate* isolate, const ModuleWireBytes& wire_bytes,
+ compiler::ModuleEnv* module_env,
+ ErrorThrower* thrower) {
DCHECK(!thrower->error());
const WasmModule* module = module_env->module;
@@ -1517,9 +1154,10 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
FunctionBody body{func.sig, func.code.offset(), base + func.code.offset(),
base + func.code.end_offset()};
DecodeResult result = VerifyWasmCodeWithStats(
- isolate_->allocator(), module, body, module->is_wasm(), counters());
+ isolate->allocator(), module, body, module->is_wasm(),
+ isolate->async_counters().get());
if (result.failed()) {
- TruncatedUserString<> name(wire_bytes.GetName(&func));
+ TruncatedUserString<> name(wire_bytes.GetName(&func, module));
thrower->CompileError("Compiling function #%d:%.*s failed: %s @+%u", i,
name.length(), name.start(),
result.error_msg().c_str(), result.error_offset());
@@ -1528,390 +1166,35 @@ void ModuleCompiler::ValidateSequentially(const ModuleWireBytes& wire_bytes,
}
}
-MaybeHandle<WasmModuleObject> CompileToModuleObject(
+MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
+ WasmModule* wasm_module = module.get();
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
- // TODO(mtrofin): the wasm::NativeModule parameter to the ModuleCompiler
- // constructor is null here, and initialized in CompileToModuleObjectInternal.
- // This is a point-in-time, until we remove the FLAG_wasm_jit_to_native flag,
- // and stop needing a FixedArray for code for the non-native case. Otherwise,
- // we end up moving quite a bit of initialization logic here that is also
- // needed in CompileToModuleObjectInternal, complicating the change.
- ModuleCompiler compiler(isolate, module.get(), centry_stub, nullptr);
- return compiler.CompileToModuleObjectInternal(thrower, std::move(module),
- wire_bytes, asm_js_script,
- asm_js_offset_table_bytes);
-}
-
-namespace {
-bool compile_lazy(const WasmModule* module) {
- return FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && module->is_asm_js());
-}
-
-void FlushICache(const wasm::NativeModule* native_module) {
- for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
- const wasm::WasmCode* code = native_module->GetCode(i);
- if (code == nullptr) continue;
- Assembler::FlushICache(code->instructions().start(),
- code->instructions().size());
- }
-}
-
-void FlushICache(Handle<FixedArray> functions) {
- for (int i = 0, e = functions->length(); i < e; ++i) {
- if (!functions->get(i)->IsCode()) continue;
- Code* code = Code::cast(functions->get(i));
- Assembler::FlushICache(code->instruction_start(), code->instruction_size());
- }
-}
-
-byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
- return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
-}
-
-void RecordStats(const Code* code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
-}
-
-void RecordStats(const wasm::WasmCode* code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(
- static_cast<int>(code->instructions().size()));
- counters->wasm_reloc_size()->Increment(
- static_cast<int>(code->reloc_info().size()));
-}
-
-void RecordStats(WasmCodeWrapper wrapper, Counters* counters) {
- if (wrapper.IsCodeObject()) {
- RecordStats(*wrapper.GetCode(), counters);
- } else {
- RecordStats(wrapper.GetWasmCode(), counters);
- }
-}
-
-void RecordStats(Handle<FixedArray> functions, Counters* counters) {
- DisallowHeapAllocation no_gc;
- for (int i = 0; i < functions->length(); ++i) {
- Object* val = functions->get(i);
- if (val->IsCode()) RecordStats(Code::cast(val), counters);
- }
-}
-
-void RecordStats(const wasm::NativeModule* native_module, Counters* counters) {
- for (uint32_t i = 0, e = native_module->FunctionCount(); i < e; ++i) {
- const wasm::WasmCode* code = native_module->GetCode(i);
- if (code != nullptr) RecordStats(code, counters);
- }
-}
-
-// Ensure that the code object in <code_table> at offset <func_index> has
-// deoptimization data attached. This is needed for lazy compile stubs which are
-// called from JS_TO_WASM functions or via exported function tables. The deopt
-// data is used to determine which function this lazy compile stub belongs to.
-// TODO(mtrofin): remove the instance and code_table members once we remove the
-// FLAG_wasm_jit_to_native
-WasmCodeWrapper EnsureExportedLazyDeoptData(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table,
- wasm::NativeModule* native_module,
- uint32_t func_index) {
- if (!FLAG_wasm_jit_to_native) {
- Handle<Code> code(Code::cast(code_table->get(func_index)), isolate);
- if (code->builtin_index() != Builtins::kWasmCompileLazy) {
- // No special deopt data needed for compiled functions, and imported
- // functions, which map to Illegal at this point (they get compiled at
- // instantiation time).
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION ||
- code->kind() == Code::WASM_TO_WASM_FUNCTION ||
- code->builtin_index() == Builtins::kIllegal);
- return WasmCodeWrapper(code);
- }
-
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // might be extended later for table exports (see
- // EnsureTableExportLazyDeoptData).
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 0) {
- code = isolate->factory()->CopyCode(code);
- code_table->set(func_index, *code);
- AttachWasmFunctionInfo(isolate, code, instance, func_index);
- }
-#ifdef DEBUG
- auto func_info = GetWasmFunctionInfo(isolate, code);
- DCHECK_IMPLIES(!instance.is_null(),
- *func_info.instance.ToHandleChecked() == *instance);
- DCHECK_EQ(func_index, func_info.func_index);
-#endif
- return WasmCodeWrapper(code);
- } else {
- wasm::WasmCode* code = native_module->GetCode(func_index);
- // {code} will be nullptr when exporting imports.
- if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub ||
- !code->IsAnonymous()) {
- return WasmCodeWrapper(code);
- }
- // Clone the lazy builtin into the native module.
- return WasmCodeWrapper(
- native_module->CloneLazyBuiltinInto(code, func_index));
- }
-}
-
-// Ensure that the code object in <code_table> at offset <func_index> has
-// deoptimization data attached. This is needed for lazy compile stubs which are
-// called from JS_TO_WASM functions or via exported function tables. The deopt
-// data is used to determine which function this lazy compile stub belongs to.
-// TODO(mtrofin): remove the instance and code_table members once we remove the
-// FLAG_wasm_jit_to_native
-WasmCodeWrapper EnsureTableExportLazyDeoptData(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- Handle<FixedArray> code_table, wasm::NativeModule* native_module,
- uint32_t func_index, Handle<FixedArray> export_table, int export_index,
- std::unordered_map<uint32_t, uint32_t>* num_table_exports) {
- if (!FLAG_wasm_jit_to_native) {
- Handle<Code> code =
- EnsureExportedLazyDeoptData(isolate, instance, code_table,
- native_module, func_index)
- .GetCode();
- if (code->builtin_index() != Builtins::kWasmCompileLazy)
- return WasmCodeWrapper(code);
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
-
- // deopt_data:
- // #0: weak instance
- // #1: func_index
- // [#2: export table
- // #3: export table index]
- // [#4: export table
- // #5: export table index]
- // ...
- // num_table_exports counts down and determines the index for the new
- // export table entry.
- auto table_export_entry = num_table_exports->find(func_index);
- DCHECK(table_export_entry != num_table_exports->end());
- DCHECK_LT(0, table_export_entry->second);
- uint32_t this_idx = 2 * table_export_entry->second;
- --table_export_entry->second;
- Handle<FixedArray> deopt_data(code->deoptimization_data());
- DCHECK_EQ(0, deopt_data->length() % 2);
- if (deopt_data->length() == 2) {
- // Then only the "header" (#0 and #1) exists. Extend for the export table
- // entries (make space for this_idx + 2 elements).
- deopt_data = isolate->factory()->CopyFixedArrayAndGrow(deopt_data,
- this_idx, TENURED);
- code->set_deoptimization_data(*deopt_data);
- }
- DCHECK_LE(this_idx + 2, deopt_data->length());
- DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
- DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
- deopt_data->set(this_idx, *export_table);
- deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
- return WasmCodeWrapper(code);
- } else {
- const wasm::WasmCode* code =
- EnsureExportedLazyDeoptData(isolate, instance, code_table,
- native_module, func_index)
- .GetWasmCode();
- if (code == nullptr || code->kind() != wasm::WasmCode::kLazyStub)
- return WasmCodeWrapper(code);
-
- // deopt_data:
- // [#0: export table
- // #1: export table index]
- // [#2: export table
- // #3: export table index]
- // ...
- // num_table_exports counts down and determines the index for the new
- // export table entry.
- auto table_export_entry = num_table_exports->find(func_index);
- DCHECK(table_export_entry != num_table_exports->end());
- DCHECK_LT(0, table_export_entry->second);
- --table_export_entry->second;
- uint32_t this_idx = 2 * table_export_entry->second;
- int int_func_index = static_cast<int>(func_index);
- Object* deopt_entry =
- native_module->compiled_module()->lazy_compile_data()->get(
- int_func_index);
- FixedArray* deopt_data = nullptr;
- if (!deopt_entry->IsFixedArray()) {
- // we count indices down, so we enter here first for the
- // largest index.
- deopt_data = *isolate->factory()->NewFixedArray(this_idx + 2, TENURED);
- native_module->compiled_module()->lazy_compile_data()->set(int_func_index,
- deopt_data);
- } else {
- deopt_data = FixedArray::cast(deopt_entry);
- DCHECK_LE(this_idx + 2, deopt_data->length());
- }
- DCHECK(deopt_data->get(this_idx)->IsUndefined(isolate));
- DCHECK(deopt_data->get(this_idx + 1)->IsUndefined(isolate));
- deopt_data->set(this_idx, *export_table);
- deopt_data->set(this_idx + 1, Smi::FromInt(export_index));
- return WasmCodeWrapper(code);
- }
-}
-
-bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
- return offset + size <= upper && offset + size >= offset;
-}
-
-using WasmInstanceMap =
- IdentityMap<Handle<WasmInstanceObject>, FreeStoreAllocationPolicy>;
-
-WasmCodeWrapper MakeWasmToWasmWrapper(
- Isolate* isolate, Handle<WasmExportedFunction> imported_function,
- FunctionSig* expected_sig, FunctionSig** sig,
- WasmInstanceMap* imported_instances, Handle<WasmInstanceObject> instance,
- uint32_t index) {
- // TODO(wasm): cache WASM-to-WASM wrappers by signature and clone+patch.
- Handle<WasmInstanceObject> imported_instance(imported_function->instance(),
- isolate);
- imported_instances->Set(imported_instance, imported_instance);
- WasmContext* new_wasm_context = imported_instance->wasm_context()->get();
- Address new_wasm_context_address =
- reinterpret_cast<Address>(new_wasm_context);
- *sig = imported_instance->module()
- ->functions[imported_function->function_index()]
- .sig;
- if (expected_sig && !expected_sig->Equals(*sig)) return {};
-
- if (!FLAG_wasm_jit_to_native) {
- Handle<Code> wrapper_code = compiler::CompileWasmToWasmWrapper(
- isolate, imported_function->GetWasmCode(), *sig,
- new_wasm_context_address);
- // Set the deoptimization data for the WasmToWasm wrapper. This is
- // needed by the interpreter to find the imported instance for
- // a cross-instance call.
- AttachWasmFunctionInfo(isolate, wrapper_code, imported_instance,
- imported_function->function_index());
- return WasmCodeWrapper(wrapper_code);
- } else {
- Handle<Code> code = compiler::CompileWasmToWasmWrapper(
- isolate, imported_function->GetWasmCode(), *sig,
- new_wasm_context_address);
- return WasmCodeWrapper(
- instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- code, wasm::WasmCode::kWasmToWasmWrapper, index));
- }
-}
-
-WasmCodeWrapper UnwrapExportOrCompileImportWrapper(
- Isolate* isolate, FunctionSig* sig, Handle<JSReceiver> target,
- uint32_t import_index, ModuleOrigin origin,
- WasmInstanceMap* imported_instances, Handle<FixedArray> js_imports_table,
- Handle<WasmInstanceObject> instance) {
- if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
- FunctionSig* unused = nullptr;
- return MakeWasmToWasmWrapper(
- isolate, Handle<WasmExportedFunction>::cast(target), sig, &unused,
- imported_instances, instance, import_index);
- }
- // No wasm function or being debugged. Compile a new wrapper for the new
- // signature.
- if (FLAG_wasm_jit_to_native) {
- Handle<Code> temp_code = compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin,
- instance->compiled_module()->use_trap_handler(), js_imports_table);
- return WasmCodeWrapper(
- instance->compiled_module()->GetNativeModule()->AddCodeCopy(
- temp_code, wasm::WasmCode::kWasmToJsWrapper, import_index));
- } else {
- return WasmCodeWrapper(compiler::CompileWasmToJSWrapper(
- isolate, target, sig, import_index, origin,
- instance->compiled_module()->use_trap_handler(), js_imports_table));
- }
-}
-
-double MonotonicallyIncreasingTimeInMs() {
- return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
- base::Time::kMillisecondsPerSecond;
-}
-
-void FunctionTableFinalizer(const v8::WeakCallbackInfo<void>& data) {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(
- reinterpret_cast<JSObject**>(data.GetParameter())));
-}
-
-std::unique_ptr<compiler::ModuleEnv> CreateDefaultModuleEnv(
- Isolate* isolate, WasmModule* module, Handle<Code> illegal_builtin) {
- std::vector<GlobalHandleAddress> function_tables;
-
- for (size_t i = module->function_tables.size(); i > 0; --i) {
- Handle<Object> func_table =
- isolate->global_handles()->Create(isolate->heap()->undefined_value());
- GlobalHandles::MakeWeak(func_table.location(), func_table.location(),
- &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
- function_tables.push_back(func_table.address());
- }
-
- // TODO(kschimpf): Add module-specific policy handling here (see v8:7143)?
- bool use_trap_handler = trap_handler::IsTrapHandlerEnabled();
- return base::make_unique<compiler::ModuleEnv>(
- module, function_tables, std::vector<Handle<Code>>{}, illegal_builtin,
- use_trap_handler);
-}
-
-// TODO(mtrofin): remove code_table when we don't need FLAG_wasm_jit_to_native
-Handle<WasmCompiledModule> NewCompiledModule(Isolate* isolate,
- WasmModule* module,
- Handle<FixedArray> code_table,
- Handle<FixedArray> export_wrappers,
- compiler::ModuleEnv* env) {
- Handle<WasmCompiledModule> compiled_module =
- WasmCompiledModule::New(isolate, module, code_table, export_wrappers,
- env->function_tables, env->use_trap_handler);
- return compiled_module;
-}
-
-template <typename T>
-void ReopenHandles(Isolate* isolate, const std::vector<Handle<T>>& vec) {
- auto& mut = const_cast<std::vector<Handle<T>>&>(vec);
- for (size_t i = 0; i < mut.size(); i++) {
- mut[i] = Handle<T>(*mut[i], isolate);
- }
-}
-
-} // namespace
-
-MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
- ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
- const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
- Vector<const byte> asm_js_offset_table_bytes) {
TimedHistogramScope wasm_compile_module_time_scope(
- module_->is_wasm() ? counters()->wasm_compile_wasm_module_time()
- : counters()->wasm_compile_asm_module_time());
+ wasm_module->is_wasm()
+ ? isolate->async_counters()->wasm_compile_wasm_module_time()
+ : isolate->async_counters()->wasm_compile_asm_module_time());
// TODO(6792): No longer needed once WebAssembly code is off heap. Use
// base::Optional to be able to close the scope before notifying the debugger.
base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), isolate_->heap());
- // The {module} parameter is passed in to transfer ownership of the WasmModule
- // to this function. The WasmModule itself existed already as an instance
- // variable of the ModuleCompiler. We check here that the parameter and the
- // instance variable actually point to the same object.
- DCHECK_EQ(module.get(), module_);
+ base::in_place_t(), isolate->heap());
+
// Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(module_);
+ bool lazy_compile = compile_lazy(wasm_module);
- Factory* factory = isolate_->factory();
+ Factory* factory = isolate->factory();
// Create heap objects for script, module bytes and asm.js offset table to
// be stored in the shared module data.
Handle<Script> script;
Handle<ByteArray> asm_js_offset_table;
if (asm_js_script.is_null()) {
- script = CreateWasmScript(isolate_, wire_bytes);
+ script = CreateWasmScript(isolate, wire_bytes);
} else {
script = asm_js_script;
asm_js_offset_table =
- isolate_->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
asm_js_offset_table_bytes.length());
}
@@ -1927,7 +1210,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::From(isolate_, module.release());
+ WasmModuleWrapper::From(isolate, module.release());
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
@@ -1935,97 +1218,66 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// breakpoints on a (potentially empty) subset of the instances.
Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+ isolate, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
script, asm_js_offset_table);
- if (lazy_compile) WasmSharedModuleData::PrepareForLazyCompilation(shared);
-
- Handle<Code> init_builtin = lazy_compile
- ? BUILTIN_CODE(isolate_, WasmCompileLazy)
- : BUILTIN_CODE(isolate_, Illegal);
-
- // TODO(mtrofin): remove code_table and code_table_size when we don't
- // need FLAG_wasm_jit_to_native anymore. Keep export_wrappers.
- int code_table_size = static_cast<int>(module_->functions.size());
- int export_wrappers_size = static_cast<int>(module_->num_exported_functions);
- Handle<FixedArray> code_table =
- factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+
+ int export_wrappers_size =
+ static_cast<int>(wasm_module->num_exported_functions);
Handle<FixedArray> export_wrappers =
factory->NewFixedArray(static_cast<int>(export_wrappers_size), TENURED);
- // Initialize the code table.
- for (int i = 0, e = code_table->length(); i < e; ++i) {
- code_table->set(i, *init_builtin);
- }
-
+ Handle<Code> init_builtin = BUILTIN_CODE(isolate, Illegal);
for (int i = 0, e = export_wrappers->length(); i < e; ++i) {
export_wrappers->set(i, *init_builtin);
}
- auto env = CreateDefaultModuleEnv(isolate_, module_, init_builtin);
+ auto env = CreateDefaultModuleEnv(isolate, wasm_module);
// Create the compiled module object and populate with compiled functions
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
- Handle<WasmCompiledModule> compiled_module = NewCompiledModule(
- isolate_, shared->module(), code_table, export_wrappers, env.get());
- native_module_ = compiled_module->GetNativeModule();
- compiled_module->OnWasmModuleDecodingComplete(shared);
- if (lazy_compile && FLAG_wasm_jit_to_native) {
- Handle<FixedArray> lazy_compile_data = isolate_->factory()->NewFixedArray(
- static_cast<int>(module_->functions.size()), TENURED);
- compiled_module->set_lazy_compile_data(*lazy_compile_data);
- }
+ Handle<WasmCompiledModule> compiled_module =
+ NewCompiledModule(isolate, shared->module(), export_wrappers, env.get());
+ NativeModule* native_module = compiled_module->GetNativeModule();
+ compiled_module->set_shared(*shared);
+ if (lazy_compile) {
+ if (wasm_module->is_wasm()) {
+ // Validate wasm modules for lazy compilation. Don't validate asm.js
+ // modules, they are valid by construction (otherwise a CHECK will fail
+ // during lazy compilation).
+ // TODO(clemensh): According to the spec, we can actually skip validation
+ // at module creation time, and return a function that always traps at
+ // (lazy) compilation time.
+ ValidateSequentially(isolate, wire_bytes, env.get(), thrower);
+ if (thrower->error()) return {};
+ }
- if (!lazy_compile) {
+ native_module->SetLazyBuiltin(BUILTIN_CODE(isolate, WasmCompileLazy));
+ } else {
size_t funcs_to_compile =
- module_->functions.size() - module_->num_imported_functions;
+ wasm_module->functions.size() - wasm_module->num_imported_functions;
bool compile_parallel =
!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
funcs_to_compile > 1 &&
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() > 0;
- // Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results(
- FLAG_wasm_jit_to_native ? 0 : env->module->functions.size());
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
if (compile_parallel) {
- CompileInParallel(wire_bytes, env.get(), results, thrower);
+ CompileInParallel(isolate, native_module, wire_bytes, env.get(),
+ centry_stub, thrower);
} else {
- CompileSequentially(wire_bytes, env.get(), results, thrower);
+ CompileSequentially(isolate, native_module, wire_bytes, env.get(),
+ thrower);
}
if (thrower->error()) return {};
- if (!FLAG_wasm_jit_to_native) {
- // At this point, compilation has completed. Update the code table.
- for (size_t i =
- module_->num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- i < results.size(); ++i) {
- Code* code = *results[i];
- code_table->set(static_cast<int>(i), code);
- RecordStats(code, counters());
- }
- } else {
- RecordStats(native_module_, counters());
- }
- } else {
- if (module_->is_wasm()) {
- // Validate wasm modules for lazy compilation. Don't validate asm.js
- // modules, they are valid by construction (otherwise a CHECK will fail
- // during lazy compilation).
- // TODO(clemensh): According to the spec, we can actually skip validation
- // at module creation time, and return a function that always traps at
- // (lazy) compilation time.
- ValidateSequentially(wire_bytes, env.get(), thrower);
- }
- if (FLAG_wasm_jit_to_native) {
- native_module_->SetLazyBuiltin(BUILTIN_CODE(isolate_, WasmCompileLazy));
- }
+ RecordStats(native_module, isolate->async_counters().get());
}
- if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
- CompileJsToWasmWrappers(isolate_, compiled_module, counters());
+ CompileJsToWasmWrappers(isolate, compiled_module,
+ isolate->async_counters().get());
Handle<WasmModuleObject> result =
- WasmModuleObject::New(isolate_, compiled_module);
+ WasmModuleObject::New(isolate, compiled_module);
// If we created a wasm script, finish it now and make it public to the
// debugger.
@@ -2034,12 +1286,120 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
// debugger.
modification_scope.reset();
script->set_wasm_compiled_module(*compiled_module);
- isolate_->debug()->OnAfterCompile(script);
+ isolate->debug()->OnAfterCompile(script);
}
return result;
}
+// The runnable task that finishes compilation in foreground (e.g. updating
+// the NativeModule, the code table, etc.).
+class FinishCompileTask : public CancelableTask {
+ public:
+ explicit FinishCompileTask(CompilationState* compilation_state,
+ CancelableTaskManager* task_manager)
+ : CancelableTask(task_manager), compilation_state_(compilation_state) {}
+
+ void RunInternal() override {
+ Isolate* isolate = compilation_state_->isolate();
+ HandleScope scope(isolate);
+ SaveContext saved_context(isolate);
+ isolate->set_context(nullptr);
+
+ TRACE_COMPILE("(4a) Finishing compilation units...\n");
+ if (compilation_state_->failed()) {
+ compilation_state_->SetFinisherIsRunning(false);
+ return;
+ }
+
+ // We execute for 1 ms and then reschedule the task, same as the GC.
+ double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
+ while (true) {
+ compilation_state_->RestartBackgroundTasks();
+
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ compilation_state_->GetNextExecutedUnit();
+
+ if (unit == nullptr) {
+ // It might happen that a background task just scheduled a unit to be
+ // finished, but did not start a finisher task since the flag was still
+ // set. Check for this case, and continue if there is more work.
+ compilation_state_->SetFinisherIsRunning(false);
+ if (compilation_state_->HasCompilationUnitToFinish() &&
+ compilation_state_->SetFinisherIsRunning(true)) {
+ continue;
+ }
+ break;
+ }
+
+ ErrorThrower thrower(compilation_state_->isolate(), "AsyncCompile");
+ wasm::WasmCode* result = unit->FinishCompilation(&thrower);
+
+ if (thrower.error()) {
+ DCHECK_NULL(result);
+ USE(result);
+ SaveContext saved_context(isolate);
+ isolate->set_context(
+ unit->native_module()->compiled_module()->native_context());
+ Handle<Object> error = thrower.Reify();
+ compilation_state_->OnError(error, NotifyCompilationCallback::kNotify);
+ compilation_state_->SetFinisherIsRunning(false);
+ break;
+ }
+
+ // Update the compilation state, and possibly notify
+ // threads waiting for events.
+ compilation_state_->OnFinishedUnit(NotifyCompilationCallback::kNotify);
+
+ if (deadline < MonotonicallyIncreasingTimeInMs()) {
+ // We reached the deadline. We reschedule this task and return
+ // immediately. Since we rescheduled this task already, we do not set
+ // the FinisherIsRunning flag to false.
+ compilation_state_->ScheduleFinisherTask();
+ return;
+ }
+ }
+ }
+
+ private:
+ CompilationState* compilation_state_;
+};
+
+// The runnable task that performs compilations in the background.
+class BackgroundCompileTask : public CancelableTask {
+ public:
+ explicit BackgroundCompileTask(CompilationState* compilation_state,
+ CancelableTaskManager* task_manager)
+ : CancelableTask(task_manager), compilation_state_(compilation_state) {}
+
+ void RunInternal() override {
+ TRACE_COMPILE("(3b) Compiling...\n");
+ // The number of currently running background tasks is reduced either in
+ // {StopBackgroundCompilationTaskForThrottling} or in
+ // {OnBackgroundTaskStopped}.
+ while (!compilation_state_->StopBackgroundCompilationTaskForThrottling()) {
+ if (compilation_state_->failed() ||
+ !FetchAndExecuteCompilationUnit(compilation_state_)) {
+ compilation_state_->OnBackgroundTaskStopped();
+ break;
+ }
+ }
+ }
+
+ private:
+ CompilationState* compilation_state_;
+};
+} // namespace
+
+MaybeHandle<WasmModuleObject> CompileToModuleObject(
+ Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
+ const ModuleWireBytes& wire_bytes, Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) {
+ return CompileToModuleObjectInternal(isolate, thrower, std::move(module),
+ wire_bytes, asm_js_script,
+ asm_js_offset_table_bytes);
+}
+
InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
@@ -2080,111 +1440,41 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Reuse the compiled module (if no owner), otherwise clone.
//--------------------------------------------------------------------------
- // TODO(mtrofin): remove code_table
- // when FLAG_wasm_jit_to_native is not needed
- Handle<FixedArray> code_table;
- Handle<FixedArray> wrapper_table;
- MaybeHandle<WasmInstanceObject> owner;
- // native_module is the one we're building now, old_module
- // is the one we clone from. They point to the same place if
- // we don't need to clone.
+ Handle<FixedArray> export_wrappers;
wasm::NativeModule* native_module = nullptr;
- wasm::NativeModule* old_module = nullptr;
+ // Root the old instance, if any, in case later allocation causes GC,
+ // to prevent the finalizer running for the old instance.
+ MaybeHandle<WasmInstanceObject> old_instance;
TRACE("Starting new module instantiation\n");
+ Handle<WasmCompiledModule> original =
+ handle(module_object_->compiled_module());
{
- // Root the owner, if any, before doing any allocations, which
- // may trigger GC.
- // Both owner and original template need to be in sync. Even
- // after we lose the original template handle, the code
- // objects we copied from it have data relative to the
- // instance - such as globals addresses.
- Handle<WasmCompiledModule> original;
- {
- DisallowHeapAllocation no_gc;
- original = handle(module_object_->compiled_module());
- if (original->has_weak_owning_instance()) {
- owner = handle(WasmInstanceObject::cast(
- original->weak_owning_instance()->value()));
- }
- }
- DCHECK(!original.is_null());
- if (original->has_weak_owning_instance()) {
+ if (original->has_instance()) {
+ old_instance = handle(original->owning_instance());
// Clone, but don't insert yet the clone in the instances chain.
- // We do that last. Since we are holding on to the owner instance,
+ // We do that last. Since we are holding on to the old instance,
// the owner + original state used for cloning and patching
// won't be mutated by possible finalizer runs.
- DCHECK(!owner.is_null());
- if (FLAG_wasm_jit_to_native) {
- TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
- compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- native_module = compiled_module_->GetNativeModule();
- wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
- } else {
- TRACE("Cloning from %d\n", original->instance_id());
- compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
- code_table = handle(compiled_module_->code_table(), isolate_);
- wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
- // Avoid creating too many handles in the outer scope.
- HandleScope scope(isolate_);
-
- // Clone the code for wasm functions and exports.
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> orig_code(Code::cast(code_table->get(i)), isolate_);
- switch (orig_code->kind()) {
- case Code::WASM_TO_JS_FUNCTION:
- case Code::WASM_TO_WASM_FUNCTION:
- // Imports will be overwritten with newly compiled wrappers.
- break;
- case Code::BUILTIN:
- DCHECK_EQ(Builtins::kWasmCompileLazy, orig_code->builtin_index());
- // If this code object has deoptimization data, then we need a
- // unique copy to attach updated deoptimization data.
- if (orig_code->deoptimization_data()->length() > 0) {
- Handle<Code> code = factory->CopyCode(orig_code);
- AttachWasmFunctionInfo(isolate_, code,
- Handle<WasmInstanceObject>(), i);
- code_table->set(i, *code);
- }
- break;
- case Code::WASM_FUNCTION: {
- Handle<Code> code = factory->CopyCode(orig_code);
- AttachWasmFunctionInfo(isolate_, code,
- Handle<WasmInstanceObject>(), i);
- code_table->set(i, *code);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
- for (int i = 0; i < wrapper_table->length(); ++i) {
- Handle<Code> orig_code(Code::cast(wrapper_table->get(i)), isolate_);
+ TRACE("Cloning from %zu\n", original->GetNativeModule()->instance_id);
+ compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ native_module = compiled_module_->GetNativeModule();
+ export_wrappers = handle(compiled_module_->export_wrappers(), isolate_);
+ for (int i = 0; i < export_wrappers->length(); ++i) {
+ Handle<Code> orig_code(Code::cast(export_wrappers->get(i)), isolate_);
DCHECK_EQ(orig_code->kind(), Code::JS_TO_WASM_FUNCTION);
Handle<Code> code = factory->CopyCode(orig_code);
- wrapper_table->set(i, *code);
+ export_wrappers->set(i, *code);
}
- if (FLAG_wasm_jit_to_native) {
- RecordStats(native_module, counters());
- } else {
- RecordStats(code_table, counters());
- }
- RecordStats(wrapper_table, counters());
+ RecordStats(native_module, counters());
+ RecordStats(export_wrappers, counters());
} else {
- // There was no owner, so we can reuse the original.
+ // No instance owned the original compiled module.
compiled_module_ = original;
- wrapper_table = handle(compiled_module_->export_wrappers(), isolate_);
- if (FLAG_wasm_jit_to_native) {
- old_module = compiled_module_->GetNativeModule();
- native_module = old_module;
- TRACE("Reusing existing instance %zu\n",
- compiled_module_->GetNativeModule()->instance_id);
- } else {
- code_table = handle(compiled_module_->code_table(), isolate_);
- TRACE("Reusing existing instance %d\n",
- compiled_module_->instance_id());
- }
+ export_wrappers = handle(compiled_module_->export_wrappers(), isolate_);
+ native_module = compiled_module_->GetNativeModule();
+ TRACE("Reusing existing instance %zu\n",
+ compiled_module_->GetNativeModule()->instance_id);
}
Handle<WeakCell> weak_native_context =
isolate_->factory()->NewWeakCell(isolate_->native_context());
@@ -2199,29 +1489,31 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Create the WebAssembly.Instance object.
//--------------------------------------------------------------------------
- Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
- CodeSpecialization code_specialization(isolate_, &instantiation_zone);
+ CodeSpecialization code_specialization;
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, compiled_module_);
+ Handle<WeakCell> weak_instance = factory->NewWeakCell(instance);
+ Handle<WeakCell> old_weak_instance(original->weak_owning_instance(),
+ isolate_);
+ code_specialization.UpdateInstanceReferences(old_weak_instance,
+ weak_instance);
+ js_to_wasm_cache_.SetWeakInstance(weak_instance);
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
- WasmContext* wasm_context = instance->wasm_context()->get();
MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_size = module_->globals_size;
if (globals_size > 0) {
- const bool enable_guard_regions = false;
- Handle<JSArrayBuffer> global_buffer =
- NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
- globals_ = global_buffer;
- if (globals_.is_null()) {
+ constexpr bool enable_guard_regions = false;
+ if (!NewArrayBuffer(isolate_, globals_size, enable_guard_regions)
+ .ToHandle(&globals_)) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
- wasm_context->globals_start =
- reinterpret_cast<byte*>(global_buffer->backing_store());
- instance->set_globals_buffer(*global_buffer);
+ instance->set_globals_start(
+ reinterpret_cast<byte*>(globals_->backing_store()));
+ instance->set_globals_buffer(*globals_);
}
//--------------------------------------------------------------------------
@@ -2236,7 +1528,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
- int num_imported_functions = ProcessImports(code_table, instance);
+ int num_imported_functions = ProcessImports(instance);
if (num_imported_functions < 0) return {};
//--------------------------------------------------------------------------
@@ -2264,9 +1556,13 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
- DCHECK_IMPLIES(use_trap_handler(),
- module_->is_asm_js() || memory->has_guard_region());
- } else if (initial_pages > 0) {
+ DCHECK_IMPLIES(use_trap_handler(), module_->is_asm_js() ||
+ memory->is_wasm_memory() ||
+ memory->backing_store() == nullptr);
+ } else if (initial_pages > 0 || use_trap_handler()) {
+ // We need to unconditionally create a guard region if using trap handlers,
+ // even when the size is zero to prevent null-derefence issues
+ // (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages);
if (memory_.is_null()) return {}; // failed to allocate memory
@@ -2289,12 +1585,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
if (!memory_.is_null()) {
- // Double-check the {memory} array buffer matches the context.
+ // Double-check the {memory} array buffer matches the instance.
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
uint32_t mem_size = 0;
CHECK(memory->byte_length()->ToUint32(&mem_size));
- CHECK_EQ(wasm_context->mem_size, mem_size);
- CHECK_EQ(wasm_context->mem_start, memory->backing_store());
+ CHECK_EQ(instance->memory_size(), mem_size);
+ CHECK_EQ(instance->memory_start(), memory->backing_store());
}
}
@@ -2304,11 +1600,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (WasmTableInit& table_init : module_->table_inits) {
DCHECK(table_init.table_index < table_instances_.size());
uint32_t base = EvalUint32InitExpr(table_init.offset);
- uint32_t table_size =
- table_instances_[table_init.table_index].function_table->length() /
- compiler::kFunctionTableEntrySize;
- if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
- table_size)) {
+ size_t table_size = table_instances_[table_init.table_index].table_size;
+ if (!in_bounds(base, table_init.entries.size(), table_size)) {
thrower_->LinkError("table initializer is out of bounds");
return {};
}
@@ -2319,47 +1612,12 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
for (WasmDataSegment& seg : module_->data_segments) {
uint32_t base = EvalUint32InitExpr(seg.dest_addr);
- if (!in_bounds(base, seg.source.length(), wasm_context->mem_size)) {
+ if (!in_bounds(base, seg.source.length(), instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
return {};
}
}
- // Set the WasmContext address in wrappers.
- // TODO(wasm): the wasm context should only appear as a constant in wrappers;
- // this code specialization is applied to the whole instance.
- Address wasm_context_address = reinterpret_cast<Address>(wasm_context);
- code_specialization.RelocateWasmContextReferences(wasm_context_address);
- js_to_wasm_cache_.SetContextAddress(wasm_context_address);
-
- if (!FLAG_wasm_jit_to_native) {
- //--------------------------------------------------------------------------
- // Set up the runtime support for the new instance.
- //--------------------------------------------------------------------------
- Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
-
- for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs,
- num_functions = static_cast<int>(module_->functions.size());
- i < num_functions; ++i) {
- Handle<Code> code = handle(Code::cast(code_table->get(i)), isolate_);
- if (code->kind() == Code::WASM_FUNCTION) {
- AttachWasmFunctionInfo(isolate_, code, weak_link, i);
- continue;
- }
- DCHECK_EQ(Builtins::kWasmCompileLazy, code->builtin_index());
- int deopt_len = code->deoptimization_data()->length();
- if (deopt_len == 0) continue;
- DCHECK_LE(2, deopt_len);
- DCHECK_EQ(i, Smi::ToInt(code->deoptimization_data()->get(1)));
- code->deoptimization_data()->set(0, *weak_link);
- // Entries [2, deopt_len) encode information about table exports of this
- // function. This is rebuilt in {LoadTableSegments}, so reset it here.
- for (int i = 2; i < deopt_len; ++i) {
- code->deoptimization_data()->set_undefined(isolate_, i);
- }
- }
- }
-
//--------------------------------------------------------------------------
// Set up the exports object for the new instance.
//--------------------------------------------------------------------------
@@ -2370,50 +1628,41 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Initialize the indirect function tables.
//--------------------------------------------------------------------------
if (function_table_count > 0) {
- LoadTableSegments(code_table, instance);
+ LoadTableSegments(instance);
}
//--------------------------------------------------------------------------
// Initialize the memory by loading data segments.
//--------------------------------------------------------------------------
if (module_->data_segments.size() > 0) {
- LoadDataSegments(wasm_context);
+ LoadDataSegments(instance);
}
// Patch all code with the relocations registered in code_specialization.
- code_specialization.RelocateDirectCalls(instance);
- code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
+ code_specialization.RelocateDirectCalls(native_module);
+ code_specialization.ApplyToWholeModule(native_module, SKIP_ICACHE_FLUSH);
- if (FLAG_wasm_jit_to_native) {
- FlushICache(native_module);
- } else {
- FlushICache(code_table);
- }
- FlushICache(wrapper_table);
+ FlushICache(native_module);
+ FlushICache(export_wrappers);
//--------------------------------------------------------------------------
// Unpack and notify signal handler of protected instructions.
//--------------------------------------------------------------------------
if (use_trap_handler()) {
- if (FLAG_wasm_jit_to_native) {
- UnpackAndRegisterProtectedInstructions(isolate_, native_module);
- } else {
- UnpackAndRegisterProtectedInstructionsGC(isolate_, code_table);
- }
+ native_module->UnpackAndRegisterProtectedInstructions();
}
//--------------------------------------------------------------------------
// Insert the compiled module into the weak list of compiled modules.
//--------------------------------------------------------------------------
{
- Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
- if (!owner.is_null()) {
+ if (!old_instance.is_null()) {
// Publish the new instance to the instances chain.
DisallowHeapAllocation no_gc;
compiled_module_->InsertInChain(*module_object_);
}
module_object_->set_compiled_module(*compiled_module_);
- compiled_module_->set_weak_owning_instance(*link_to_owning_instance);
+ compiled_module_->set_weak_owning_instance(*weak_instance);
WasmInstanceObject::InstallFinalizer(isolate_, instance);
}
@@ -2443,8 +1692,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
int start_index = module_->start_function_index;
- WasmCodeWrapper start_code = EnsureExportedLazyDeoptData(
- isolate_, instance, code_table, native_module, start_index);
+ wasm::WasmCode* start_code =
+ native_module->GetIndirectlyCallableCode(start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
isolate_, module_, start_code, start_index,
@@ -2456,12 +1705,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
DCHECK(!isolate_->has_pending_exception());
- if (FLAG_wasm_jit_to_native) {
- TRACE("Successfully built instance %zu\n",
- compiled_module_->GetNativeModule()->instance_id);
- } else {
- TRACE("Finishing instance %d\n", compiled_module_->instance_id());
- }
+ TRACE("Successfully built instance %zu\n",
+ compiled_module_->GetNativeModule()->instance_id);
TRACE_CHAIN(module_object_->compiled_module());
return instance;
}
@@ -2567,7 +1812,7 @@ uint32_t InstanceBuilder::EvalUint32InitExpr(const WasmInitExpr& expr) {
}
// Load data segments into the memory.
-void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) {
+void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
Handle<SeqOneByteString> module_bytes(
compiled_module_->shared()->module_bytes(), isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
@@ -2575,8 +1820,8 @@ void InstanceBuilder::LoadDataSegments(WasmContext* wasm_context) {
// Segments of size == 0 are just nops.
if (source_size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- DCHECK(in_bounds(dest_offset, source_size, wasm_context->mem_size));
- byte* dest = wasm_context->mem_start + dest_offset;
+ DCHECK(in_bounds(dest_offset, source_size, instance->memory_size()));
+ byte* dest = instance->memory_start() + dest_offset;
const byte* src = reinterpret_cast<const byte*>(
module_bytes->GetCharsAddress() + segment.source.offset());
memcpy(dest, src, source_size);
@@ -2648,40 +1893,12 @@ void InstanceBuilder::SanitizeImports() {
}
}
-Handle<FixedArray> InstanceBuilder::SetupWasmToJSImportsTable(
- Handle<WasmInstanceObject> instance) {
- // The js_imports_table is set up so that index 0 has isolate->native_context
- // and for every index, 3*index+1 has the JSReceiver, 3*index+2 has function's
- // global proxy and 3*index+3 has function's context. Hence, the fixed array's
- // size is 3*import_table.size+1.
- int size = static_cast<int>(module_->import_table.size());
- CHECK_LE(size, (kMaxInt - 1) / 3);
- Handle<FixedArray> func_table =
- isolate_->factory()->NewFixedArray(3 * size + 1, TENURED);
- Handle<FixedArray> js_imports_table =
- isolate_->global_handles()->Create(*func_table);
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(js_imports_table.location()),
- js_imports_table.location(), &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
- instance->set_js_imports_table(*func_table);
- js_imports_table->set(0, *isolate_->native_context());
- return js_imports_table;
-}
-
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
-int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
- using compiler::kFunctionTableSignatureOffset;
- using compiler::kFunctionTableCodeOffset;
- using compiler::kFunctionTableEntrySize;
+int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
int num_imported_functions = 0;
int num_imported_tables = 0;
- Handle<FixedArray> js_imports_table = SetupWasmToJSImportsTable(instance);
- WasmInstanceMap imported_wasm_instances(isolate_->heap());
- SetOfNativeModuleModificationScopes set_of_native_module_scopes;
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (int index = 0; index < static_cast<int>(module_->import_table.size());
@@ -2691,6 +1908,8 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
Handle<String> module_name = sanitized_imports_[index].module_name;
Handle<String> import_name = sanitized_imports_[index].import_name;
Handle<Object> value = sanitized_imports_[index].value;
+ NativeModule* native_module =
+ instance->compiled_module()->GetNativeModule();
switch (import.kind) {
case kExternalFunction: {
@@ -2700,20 +1919,44 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
module_name, import_name);
return -1;
}
- WasmCodeWrapper import_code = UnwrapExportOrCompileImportWrapper(
- isolate_, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(value), num_imported_functions,
- module_->origin(), &imported_wasm_instances, js_imports_table,
- instance);
- if (import_code.is_null()) {
- ReportLinkError("imported function does not match the expected type",
- index, module_name, import_name);
- return -1;
- }
- if (!FLAG_wasm_jit_to_native) {
- code_table->set(num_imported_functions, *import_code.GetCode());
+ uint32_t func_index = import.index;
+ DCHECK_EQ(num_imported_functions, func_index);
+ FunctionSig* expected_sig = module_->functions[func_index].sig;
+ if (WasmExportedFunction::IsWasmExportedFunction(*value)) {
+ // The imported function is a WASM function from another instance.
+ Handle<WasmExportedFunction> imported_function(
+ WasmExportedFunction::cast(*value), isolate_);
+ Handle<WasmInstanceObject> imported_instance(
+ imported_function->instance(), isolate_);
+ FunctionSig* imported_sig =
+ imported_instance->module()
+ ->functions[imported_function->function_index()]
+ .sig;
+ if (!imported_sig->Equals(expected_sig)) {
+ ReportLinkError(
+ "imported function does not match the expected type", index,
+ module_name, import_name);
+ return -1;
+ }
+ // The import reference is the instance object itself.
+ auto wasm_code = imported_function->GetWasmCode();
+ ImportedFunctionEntry(*instance, func_index)
+ .set(*imported_instance, wasm_code);
+ native_module->SetCode(func_index, wasm_code);
+ } else {
+ // The imported function is a callable.
+ Handle<JSReceiver> js_receiver(JSReceiver::cast(*value), isolate_);
+ Handle<Code> wrapper_code = compiler::CompileWasmToJSWrapper(
+ isolate_, js_receiver, expected_sig, func_index,
+ module_->origin(),
+ instance->compiled_module()->use_trap_handler());
+ RecordStats(*wrapper_code, counters());
+
+ WasmCode* wasm_code = native_module->AddCodeCopy(
+ wrapper_code, wasm::WasmCode::kWasmToJsWrapper, func_index);
+ ImportedFunctionEntry(*instance, func_index)
+ .set(*js_receiver, wasm_code);
}
- RecordStats(import_code, counters());
num_imported_functions++;
break;
}
@@ -2723,19 +1966,20 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
module_name, import_name);
return -1;
}
- WasmIndirectFunctionTable& table =
- module_->function_tables[num_imported_tables];
- TableInstance& table_instance = table_instances_[num_imported_tables];
+ uint32_t table_num = import.index;
+ DCHECK_EQ(table_num, num_imported_tables);
+ WasmIndirectFunctionTable& table = module_->function_tables[table_num];
+ TableInstance& table_instance = table_instances_[table_num];
table_instance.table_object = Handle<WasmTableObject>::cast(value);
instance->set_table_object(*table_instance.table_object);
table_instance.js_wrappers = Handle<FixedArray>(
table_instance.table_object->functions(), isolate_);
- int imported_cur_size = table_instance.js_wrappers->length();
- if (imported_cur_size < static_cast<int>(table.initial_size)) {
+ int imported_table_size = table_instance.js_wrappers->length();
+ if (imported_table_size < static_cast<int>(table.initial_size)) {
thrower_->LinkError(
"table import %d is smaller than initial %d, got %u", index,
- table.initial_size, imported_cur_size);
+ table.initial_size, imported_table_size);
return -1;
}
@@ -2757,23 +2001,15 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
}
- // Allocate a new dispatch table, containing <smi(sig), code> pairs.
- CHECK_GE(kMaxInt / kFunctionTableEntrySize, imported_cur_size);
- int table_size = kFunctionTableEntrySize * imported_cur_size;
- table_instance.function_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = kFunctionTableSignatureOffset; i < table_size;
- i += kFunctionTableEntrySize) {
- table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
- }
- WasmContext* wasm_context = nullptr;
- if (WASM_CONTEXT_TABLES) {
- wasm_context = instance->wasm_context()->get();
- EnsureWasmContextTable(wasm_context, imported_cur_size);
+ // Allocate a new dispatch table.
+ if (!instance->has_indirect_function_table()) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, imported_table_size);
+ table_instances_[table_num].table_size = imported_table_size;
}
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
- for (int i = 0; i < imported_cur_size; ++i) {
+ for (int i = 0; i < imported_table_size; ++i) {
Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
// TODO(mtrofin): this is the same logic as WasmTableObject::Set:
// insert in the local table a wrapper from the other module, and add
@@ -2788,32 +2024,16 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
// id, then the signature does not appear at all in this module,
// so putting {-1} in the table will cause checks to always fail.
auto target = Handle<WasmExportedFunction>::cast(val);
- if (!WASM_CONTEXT_TABLES) {
- FunctionSig* sig = nullptr;
- Handle<Code> code =
- MakeWasmToWasmWrapper(isolate_, target, nullptr, &sig,
- &imported_wasm_instances, instance, 0)
- .GetCode();
- int sig_index = module_->signature_map.Find(sig);
- table_instance.function_table->set(
- compiler::FunctionTableSigOffset(i), Smi::FromInt(sig_index));
- table_instance.function_table->set(
- compiler::FunctionTableCodeOffset(i), *code);
- } else {
- Handle<WasmInstanceObject> imported_instance =
- handle(target->instance());
- const wasm::WasmCode* exported_code =
- target->GetWasmCode().GetWasmCode();
- FunctionSig* sig = imported_instance->module()
- ->functions[exported_code->index()]
- .sig;
- auto& entry = wasm_context->table[i];
- entry.context = imported_instance->wasm_context()->get();
- entry.sig_id = module_->signature_map.Find(sig);
- entry.target = exported_code->instructions().start();
- }
+ Handle<WasmInstanceObject> imported_instance =
+ handle(target->instance());
+ const wasm::WasmCode* exported_code = target->GetWasmCode();
+ FunctionSig* sig = imported_instance->module()
+ ->functions[exported_code->index()]
+ .sig;
+ IndirectFunctionTableEntry(*instance, i)
+ .set(module_->signature_map.Find(sig), *imported_instance,
+ exported_code);
}
-
num_imported_tables++;
break;
}
@@ -2902,18 +2122,6 @@ int InstanceBuilder::ProcessImports(Handle<FixedArray> code_table,
}
}
- if (!imported_wasm_instances.empty()) {
- WasmInstanceMap::IteratableScope iteratable_scope(&imported_wasm_instances);
- Handle<FixedArray> instances_array = isolate_->factory()->NewFixedArray(
- imported_wasm_instances.size(), TENURED);
- instance->set_directly_called_instances(*instances_array);
- int index = 0;
- for (auto it = iteratable_scope.begin(), end = iteratable_scope.end();
- it != end; ++it, ++index) {
- instances_array->set(index, ***it);
- }
- }
-
return num_imported_functions;
}
@@ -2970,11 +2178,12 @@ Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t num_pages) {
const bool enable_guard_regions = use_trap_handler();
const bool is_shared_memory =
module_->has_shared_memory && i::FLAG_experimental_wasm_threads;
- Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
- isolate_, num_pages * kWasmPageSize, enable_guard_regions,
- is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
-
- if (mem_buffer.is_null()) {
+ i::SharedFlag shared_flag =
+ is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared;
+ Handle<JSArrayBuffer> mem_buffer;
+ if (!NewArrayBuffer(isolate_, num_pages * kWasmPageSize, enable_guard_regions,
+ shared_flag)
+ .ToHandle(&mem_buffer)) {
thrower_->RangeError("Out of memory: wasm memory");
}
return mem_buffer;
@@ -2996,8 +2205,8 @@ bool InstanceBuilder::NeedsWrappers() const {
void InstanceBuilder::ProcessExports(
Handle<WasmInstanceObject> instance,
Handle<WasmCompiledModule> compiled_module) {
- Handle<FixedArray> wrapper_table(compiled_module->export_wrappers(),
- isolate_);
+ Handle<FixedArray> export_wrappers(compiled_module->export_wrappers(),
+ isolate_);
if (NeedsWrappers()) {
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
@@ -3039,22 +2248,8 @@ void InstanceBuilder::ProcessExports(
desc.set_enumerable(true);
desc.set_configurable(module_->is_asm_js());
- // Store weak references to all exported functions.
- Handle<FixedArray> weak_exported_functions;
- if (compiled_module->has_weak_exported_functions()) {
- weak_exported_functions =
- handle(compiled_module->weak_exported_functions(), isolate_);
- } else {
- int export_count = 0;
- for (WasmExport& exp : module_->export_table) {
- if (exp.kind == kExternalFunction) ++export_count;
- }
- weak_exported_functions = isolate_->factory()->NewFixedArray(export_count);
- compiled_module->set_weak_exported_functions(*weak_exported_functions);
- }
-
// Process each export in the export table.
- int export_index = 0; // Index into {weak_exported_functions}.
+ int export_index = 0; // Index into {export_wrappers}.
for (WasmExport& exp : module_->export_table) {
Handle<String> name =
WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
@@ -3076,14 +2271,17 @@ void InstanceBuilder::ProcessExports(
if (js_function.is_null()) {
// Wrap the exported code as a JSFunction.
Handle<Code> export_code =
- wrapper_table->GetValueChecked<Code>(isolate_, export_index);
+ export_wrappers->GetValueChecked<Code>(isolate_, export_index);
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref =
+ module_->LookupName(compiled_module_->shared()->module_bytes(),
+ function.func_index);
func_name =
WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, handle(compiled_module_->shared(), isolate_),
- function.name)
+ func_name_ref)
.ToHandleChecked();
}
js_function = WasmExportedFunction::New(
@@ -3092,10 +2290,6 @@ void InstanceBuilder::ProcessExports(
js_wrappers_[exp.index] = js_function;
}
desc.set_value(js_function);
- Handle<WeakCell> weak_export =
- isolate_->factory()->NewWeakCell(js_function);
- DCHECK_GT(weak_exported_functions->length(), export_index);
- weak_exported_functions->set(export_index, *weak_export);
export_index++;
break;
}
@@ -3160,7 +2354,7 @@ void InstanceBuilder::ProcessExports(
return;
}
}
- DCHECK_EQ(export_index, weak_exported_functions->length());
+ DCHECK_EQ(export_index, export_wrappers->length());
if (module_->is_wasm()) {
v8::Maybe<bool> success =
@@ -3173,148 +2367,31 @@ void InstanceBuilder::ProcessExports(
void InstanceBuilder::InitializeTables(
Handle<WasmInstanceObject> instance,
CodeSpecialization* code_specialization) {
- size_t function_table_count = module_->function_tables.size();
-
- Handle<FixedArray> old_function_tables_gc =
- FLAG_wasm_jit_to_native
- ? Handle<FixedArray>::null()
- : handle(compiled_module_->function_tables(), isolate_);
-
- // function_table_count is 0 or 1, so we just create these objects even if not
- // needed for native wasm.
- // TODO(mtrofin): remove the {..}_gc variables when we don't need
- // FLAG_wasm_jit_to_native
- Handle<FixedArray> new_function_tables_gc =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
-
- // These go on the instance.
- Handle<FixedArray> rooted_function_tables =
- isolate_->factory()->NewFixedArray(static_cast<int>(function_table_count),
- TENURED);
-
- instance->set_function_tables(*rooted_function_tables);
-
- if (!FLAG_wasm_jit_to_native) {
- DCHECK_EQ(old_function_tables_gc->length(),
- new_function_tables_gc->length());
- }
- for (size_t index = 0; index < function_table_count; ++index) {
+ size_t table_count = module_->function_tables.size();
+ for (size_t index = 0; index < table_count; ++index) {
WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
- // The table holds <smi(sig), code> pairs.
- CHECK_GE(kMaxInt / compiler::kFunctionTableEntrySize, table.initial_size);
- int num_table_entries = static_cast<int>(table.initial_size);
- int table_size = compiler::kFunctionTableEntrySize * num_table_entries;
-
- if (WASM_CONTEXT_TABLES) {
- WasmContext* wasm_context = instance->wasm_context()->get();
- EnsureWasmContextTable(wasm_context, num_table_entries);
- }
-
- if (table_instance.function_table.is_null()) {
- // Create a new dispatch table if necessary.
- table_instance.function_table =
- isolate_->factory()->NewFixedArray(table_size);
- for (int i = compiler::kFunctionTableSignatureOffset; i < table_size;
- i += compiler::kFunctionTableEntrySize) {
- // Fill the table with invalid signature indexes so that
- // uninitialized entries will always fail the signature check.
- table_instance.function_table->set(i, Smi::FromInt(kInvalidSigIndex));
- }
- } else {
- // Table is imported, patch table bounds check
- int existing_table_size = table_instance.function_table->length();
- DCHECK_EQ(0, existing_table_size % compiler::kFunctionTableEntrySize);
- int existing_num_table_entries =
- existing_table_size / compiler::kFunctionTableEntrySize;
- DCHECK_LE(num_table_entries, existing_num_table_entries);
- code_specialization->PatchTableSize(num_table_entries,
- existing_num_table_entries);
- }
- int int_index = static_cast<int>(index);
- Handle<FixedArray> global_func_table =
- isolate_->global_handles()->Create(*table_instance.function_table);
- // Make the handles weak. The table objects are rooted on the instance, as
- // they belong to it. We need the global handles in order to have stable
- // pointers to embed in the instance's specialization (wasm compiled code).
- // The order of finalization doesn't matter, in that the instance finalizer
- // may be called before each table's finalizer, or vice-versa.
- // This is because values used for embedding are only interesting should we
- // {Reset} a specialization, in which case they are interesting as values,
- // they are not dereferenced.
- GlobalHandles::MakeWeak(
- reinterpret_cast<Object**>(global_func_table.location()),
- global_func_table.location(), &FunctionTableFinalizer,
- v8::WeakCallbackType::kFinalizer);
-
- rooted_function_tables->set(int_index, *global_func_table);
-
- GlobalHandleAddress new_func_table_addr = global_func_table.address();
- GlobalHandleAddress old_func_table_addr;
- if (!WASM_CONTEXT_TABLES) {
- WasmCompiledModule::SetTableValue(isolate_, new_function_tables_gc,
- int_index, new_func_table_addr);
-
- old_func_table_addr =
- WasmCompiledModule::GetTableValue(*old_function_tables_gc, int_index);
- code_specialization->RelocatePointer(old_func_table_addr,
- new_func_table_addr);
+ if (!instance->has_indirect_function_table()) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, table.initial_size);
+ table_instance.table_size = table.initial_size;
}
}
-
- if (!WASM_CONTEXT_TABLES) {
- compiled_module_->set_function_tables(*new_function_tables_gc);
- }
}
-void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
- wasm::NativeModule* native_module = compiled_module_->GetNativeModule();
+void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
+ NativeModule* native_module = compiled_module_->GetNativeModule();
int function_table_count = static_cast<int>(module_->function_tables.size());
for (int index = 0; index < function_table_count; ++index) {
TableInstance& table_instance = table_instances_[index];
- // Count the number of table exports for each function (needed for lazy
- // compilation).
- std::unordered_map<uint32_t, uint32_t> num_table_exports;
- if (compile_lazy(module_)) {
- for (auto& table_init : module_->table_inits) {
- for (uint32_t func_index : table_init.entries) {
- if (!FLAG_wasm_jit_to_native) {
- Code* code =
- Code::cast(code_table->get(static_cast<int>(func_index)));
- // Only increase the counter for lazy compile builtins (it's not
- // needed otherwise).
- if (code->builtin_index() != Builtins::kWasmCompileLazy) {
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION);
- continue;
- }
- } else {
- const wasm::WasmCode* code = native_module->GetCode(func_index);
- // Only increase the counter for lazy compile builtins (it's not
- // needed otherwise).
- if (code->kind() != wasm::WasmCode::kLazyStub) {
- DCHECK(code->kind() == wasm::WasmCode::kFunction ||
- code->kind() == wasm::WasmCode::kWasmToJsWrapper);
- continue;
- }
- }
- ++num_table_exports[func_index];
- }
- }
- }
-
// TODO(titzer): this does redundant work if there are multiple tables,
// since initializations are not sorted by table index.
for (auto& table_init : module_->table_inits) {
uint32_t base = EvalUint32InitExpr(table_init.offset);
uint32_t num_entries = static_cast<uint32_t>(table_init.entries.size());
- DCHECK(in_bounds(base, num_entries,
- table_instance.function_table->length() /
- compiler::kFunctionTableEntrySize));
+ DCHECK(in_bounds(base, num_entries, table_instance.table_size));
for (uint32_t i = 0; i < num_entries; ++i) {
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
@@ -3322,30 +2399,18 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
// Update the local dispatch table first.
uint32_t sig_id = module_->signature_ids[function->sig_index];
- table_instance.function_table->set(
- compiler::FunctionTableSigOffset(table_index),
- Smi::FromInt(sig_id));
- WasmCodeWrapper wasm_code = EnsureTableExportLazyDeoptData(
- isolate_, instance, code_table, native_module, func_index,
- table_instance.function_table, table_index, &num_table_exports);
- Handle<Object> value_to_update_with;
- if (!wasm_code.IsCodeObject()) {
- Handle<Foreign> as_foreign = isolate_->factory()->NewForeign(
- wasm_code.GetWasmCode()->instructions().start(), TENURED);
- value_to_update_with = as_foreign;
+ wasm::WasmCode* wasm_code =
+ native_module->GetIndirectlyCallableCode(func_index);
+
+ if (func_index < module_->num_imported_functions) {
+ // Imported functions have the target instance put into the IFT.
+ WasmInstanceObject* target_instance =
+ ImportedFunctionEntry(*instance, func_index).instance();
+ IndirectFunctionTableEntry(*instance, table_index)
+ .set(sig_id, target_instance, wasm_code);
} else {
- value_to_update_with = wasm_code.GetCode();
- }
- table_instance.function_table->set(
- compiler::FunctionTableCodeOffset(table_index),
- *value_to_update_with);
-
- if (WASM_CONTEXT_TABLES) {
- WasmContext* wasm_context = instance->wasm_context()->get();
- auto& entry = wasm_context->table[table_index];
- entry.sig_id = sig_id;
- entry.context = wasm_context;
- entry.target = wasm_code.instructions().start();
+ IndirectFunctionTableEntry(*instance, table_index)
+ .set(sig_id, *instance, wasm_code);
}
if (!table_instance.table_object.is_null()) {
@@ -3363,10 +2428,12 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
MaybeHandle<String> func_name;
if (module_->is_asm_js()) {
// For modules arising from asm.js, honor the names section.
+ WireBytesRef func_name_ref = module_->LookupName(
+ compiled_module_->shared()->module_bytes(), func_index);
func_name =
WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
isolate_, handle(compiled_module_->shared(), isolate_),
- function->name)
+ func_name_ref)
.ToHandleChecked();
}
Handle<WasmExportedFunction> js_function =
@@ -3381,27 +2448,18 @@ void InstanceBuilder::LoadTableSegments(Handle<FixedArray> code_table,
// UpdateDispatchTables() should update this instance as well.
WasmTableObject::UpdateDispatchTables(
isolate_, table_instance.table_object, table_index, function->sig,
- instance, wasm_code, func_index);
+ instance, wasm_code);
}
}
}
-#ifdef DEBUG
- // Check that the count of table exports was accurate. The entries are
- // decremented on each export, so all should be zero now.
- for (auto e : num_table_exports) {
- DCHECK_EQ(0, e.second);
- }
-#endif
-
// TODO(titzer): we add the new dispatch table at the end to avoid
// redundant work and also because the new instance is not yet fully
// initialized.
if (!table_instance.table_object.is_null()) {
// Add the new dispatch table to the WebAssembly.Table object.
WasmTableObject::AddDispatchTable(isolate_, table_instance.table_object,
- instance, index,
- table_instance.function_table);
+ instance, index);
}
}
}
@@ -3417,7 +2475,7 @@ AsyncCompileJob::AsyncCompileJob(Isolate* isolate,
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
- background_task_runner_ = platform->GetBackgroundTaskRunner(v8_isolate);
+ background_task_runner_ = platform->GetWorkerThreadsTaskRunner(v8_isolate);
// The handles for the context and promise must be deferred.
DeferredHandleScope deferred(isolate);
context_ = Handle<Context>(*context);
@@ -3431,6 +2489,9 @@ void AsyncCompileJob::Start() {
void AsyncCompileJob::Abort() {
background_task_manager_.CancelAndWait();
+ if (!compiled_module_.is_null()) {
+ compiled_module_->GetNativeModule()->compilation_state()->Abort();
+ }
if (num_pending_foreground_tasks_ == 0) {
// No task is pending, we can just remove the AsyncCompileJob.
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
@@ -3475,8 +2536,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
ModuleDecoder decoder_;
AsyncCompileJob* job_;
- std::unique_ptr<ModuleCompiler::CompilationUnitBuilder>
- compilation_unit_builder_;
+ std::unique_ptr<CompilationUnitBuilder> compilation_unit_builder_;
uint32_t next_function_ = 0;
};
@@ -3492,13 +2552,13 @@ AsyncCompileJob::~AsyncCompileJob() {
for (auto d : deferred_handles_) delete d;
}
-void AsyncCompileJob::AsyncCompileFailed(ErrorThrower& thrower) {
+void AsyncCompileJob::AsyncCompileFailed(Handle<Object> error_reason) {
if (stream_) stream_->NotifyError();
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
isolate_->wasm_engine()->compilation_manager()->RemoveJob(this);
MaybeHandle<Object> promise_result =
- JSPromise::Reject(module_promise_, thrower.Reify());
+ JSPromise::Reject(module_promise_, error_reason);
CHECK_EQ(promise_result.is_null(), isolate_->has_pending_exception());
}
@@ -3515,7 +2575,7 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// task) and schedule the next step(s), if any.
class AsyncCompileJob::CompileStep {
public:
- explicit CompileStep(size_t num_background_tasks = 0)
+ explicit CompileStep(int num_background_tasks = 0)
: num_background_tasks_(num_background_tasks) {}
virtual ~CompileStep() {}
@@ -3536,10 +2596,10 @@ class AsyncCompileJob::CompileStep {
virtual void RunInForeground() { UNREACHABLE(); }
virtual void RunInBackground() { UNREACHABLE(); }
- size_t NumberOfBackgroundTasks() { return num_background_tasks_; }
+ int NumberOfBackgroundTasks() { return num_background_tasks_; }
AsyncCompileJob* job_ = nullptr;
- const size_t num_background_tasks_;
+ const int num_background_tasks_;
};
class AsyncCompileJob::CompileTask : public CancelableTask {
@@ -3575,24 +2635,19 @@ void AsyncCompileJob::DoSync(Args&&... args) {
}
void AsyncCompileJob::StartBackgroundTask() {
- background_task_runner_->PostTask(
- base::make_unique<CompileTask>(this, false));
-}
-
-void AsyncCompileJob::RestartBackgroundTasks() {
- size_t num_restarts = stopped_tasks_.Value();
- stopped_tasks_.Decrement(num_restarts);
-
- for (size_t i = 0; i < num_restarts; ++i) {
- StartBackgroundTask();
- }
+ // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
+ // tasks. This is used to make timing deterministic.
+ v8::TaskRunner* task_runner = FLAG_wasm_num_compilation_tasks > 0
+ ? background_task_runner_.get()
+ : foreground_task_runner_.get();
+ task_runner->PostTask(base::make_unique<CompileTask>(this, false));
}
template <typename Step, typename... Args>
void AsyncCompileJob::DoAsync(Args&&... args) {
NextStep<Step>(std::forward<Args>(args)...);
- size_t end = step_->NumberOfBackgroundTasks();
- for (size_t i = 0; i < end; ++i) {
+ int end = step_->NumberOfBackgroundTasks();
+ for (int i = 0; i < end; ++i) {
StartBackgroundTask();
}
}
@@ -3646,7 +2701,7 @@ class AsyncCompileJob::DecodeFail : public CompileStep {
ErrorThrower thrower(job_->isolate_, "AsyncCompile");
thrower.CompileFailed("Wasm decoding failed", result_);
// {job_} is deleted in AsyncCompileFailed, therefore the {return}.
- return job_->AsyncCompileFailed(thrower);
+ return job_->AsyncCompileFailed(thrower.Reify());
}
};
@@ -3664,47 +2719,21 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(2) Prepare and start compile...\n");
+
+ // Make sure all compilation tasks stopped running. Decoding (async step)
+ // is done.
+ job_->background_task_manager_.CancelAndWait();
+
Isolate* isolate = job_->isolate_;
- Factory* factory = isolate->factory();
-
- Handle<Code> illegal_builtin = BUILTIN_CODE(isolate, Illegal);
- if (!FLAG_wasm_jit_to_native) {
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}.
- // The results of compilation will be written into it.
- // Initialize {code_table_} with the illegal builtin. All call sites
- // will be patched at instantiation.
- int code_table_size = static_cast<int>(module_->functions.size());
- job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
-
- for (int i = 0, e = module_->num_imported_functions; i < e; ++i) {
- job_->code_table_->set(i, *illegal_builtin);
- }
- } else {
- // Just makes it easier to deal with code that wants code_table, while
- // we have FLAG_wasm_jit_to_native around.
- job_->code_table_ = factory->NewFixedArray(0, TENURED);
- }
- job_->module_env_ =
- CreateDefaultModuleEnv(isolate, module_, illegal_builtin);
+ job_->module_env_ = CreateDefaultModuleEnv(isolate, module_);
- // Transfer ownership of the {WasmModule} to the {ModuleCompiler}, but
- // keep a pointer.
Handle<Code> centry_stub = CEntryStub(isolate, 1).GetCode();
{
// Now reopen the handles in a deferred scope in order to use
// them in the concurrent steps.
DeferredHandleScope deferred(isolate);
-
- centry_stub = Handle<Code>(*centry_stub, isolate);
- job_->code_table_ = Handle<FixedArray>(*job_->code_table_, isolate);
- compiler::ModuleEnv* env = job_->module_env_.get();
- ReopenHandles(isolate, env->function_code);
- Handle<Code>* mut =
- const_cast<Handle<Code>*>(&env->default_function_code);
- *mut = Handle<Code>(**mut, isolate);
-
+ job_->centry_stub_ = Handle<Code>(*centry_stub, isolate);
job_->deferred_handles_.push_back(deferred.Detach());
}
@@ -3717,14 +2746,8 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
Handle<FixedArray> export_wrappers =
job_->isolate_->factory()->NewFixedArray(export_wrapper_size, TENURED);
- job_->compiled_module_ =
- NewCompiledModule(job_->isolate_, module_, job_->code_table_,
- export_wrappers, job_->module_env_.get());
-
- job_->compiler_.reset(
- new ModuleCompiler(isolate, module_, centry_stub,
- job_->compiled_module_->GetNativeModule()));
- job_->compiler_->EnableThrottling();
+ job_->compiled_module_ = NewCompiledModule(
+ job_->isolate_, module_, export_wrappers, job_->module_env_.get());
{
DeferredHandleScope deferred(job_->isolate_);
@@ -3740,129 +2763,64 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
return;
}
- // Start asynchronous compilation tasks.
- size_t num_background_tasks =
- Max(static_cast<size_t>(1),
- Min(num_functions,
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()
- ->NumberOfAvailableBackgroundThreads())));
+ CompilationState* compilation_state =
+ job_->compiled_module_->GetNativeModule()->compilation_state();
+ {
+ // Instance field {job_} cannot be captured by copy, therefore
+ // we need to add a local helper variable {job}. We want to
+ // capture the {job} pointer by copy, as it otherwise is dependent
+ // on the current step we are in.
+ AsyncCompileJob* job = job_;
+ compilation_state->AddCallback(
+ [job](CompilationEvent event, Handle<Object> error) {
+ switch (event) {
+ case CompilationEvent::kFinishedBaselineCompilation:
+ if (job->DecrementAndCheckFinisherCount()) {
+ job->DoSync<FinishCompile>();
+ }
+ return;
+ case CompilationEvent::kFailedCompilation:
+ DeferredHandleScope deferred(job->isolate());
+ error = handle(*error, job->isolate());
+ job->deferred_handles_.push_back(deferred.Detach());
+ job->DoSync<CompileFailed>(error);
+ return;
+ }
+ UNREACHABLE();
+ });
+ }
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
// InitializeCompilationUnits always returns 0 for streaming compilation,
// then DoAsync would do the same as NextStep already.
- job_->outstanding_units_ = job_->compiler_->InitializeCompilationUnits(
- module_->functions, job_->wire_bytes_, job_->module_env_.get());
- job_->DoAsync<ExecuteAndFinishCompilationUnits>(num_background_tasks);
- } else {
- job_->stopped_tasks_ = num_background_tasks;
- job_->NextStep<ExecuteAndFinishCompilationUnits>(num_background_tasks);
+ size_t functions_count =
+ GetNumFunctionsToCompile(module_->functions, job_->module_env_.get());
+ compilation_state->SetNumberOfFunctionsToCompile(functions_count);
+ // Add compilation units and kick off compilation.
+ InitializeCompilationUnits(module_->functions, job_->wire_bytes_,
+ job_->module_env_.get(), job_->centry_stub_,
+ job_->compiled_module_->GetNativeModule());
}
}
};
//==========================================================================
-// Step 3 (async x K tasks): Execute compilation units.
+// Step 4b (sync): Compilation failed. Reject Promise.
//==========================================================================
-class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
+class AsyncCompileJob::CompileFailed : public CompileStep {
public:
- explicit ExecuteAndFinishCompilationUnits(size_t num_compile_tasks)
- : CompileStep(num_compile_tasks) {}
-
- void RunInBackground() override {
- std::function<void()> StartFinishCompilationUnit = [this]() {
- if (!failed_) job_->StartForegroundTask();
- };
-
- TRACE_COMPILE("(3) Compiling...\n");
- while (job_->compiler_->CanAcceptWork()) {
- if (failed_) break;
- DisallowHandleAllocation no_handle;
- DisallowHeapAllocation no_allocation;
- if (!job_->compiler_->FetchAndExecuteCompilationUnit(
- StartFinishCompilationUnit)) {
- break;
- }
- }
- job_->stopped_tasks_.Increment(1);
- }
+ explicit CompileFailed(Handle<Object> error_reason)
+ : error_reason_(error_reason) {}
void RunInForeground() override {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- // Use base::Optional to be able to close the scope before we resolve or
- // reject the promise.
- base::Optional<CodeSpaceMemoryModificationScope> modification_scope(
- base::in_place_t(), job_->isolate_->heap());
- TRACE_COMPILE("(4a) Finishing compilation units...\n");
- if (failed_) {
- // The job failed already, no need to do more work.
- job_->compiler_->SetFinisherIsRunning(false);
- return;
- }
- ErrorThrower thrower(job_->isolate_, "AsyncCompile");
-
- // We execute for 1 ms and then reschedule the task, same as the GC.
- double deadline = MonotonicallyIncreasingTimeInMs() + 1.0;
-
- while (true) {
- if (job_->compiler_->ShouldIncreaseWorkload()) {
- job_->RestartBackgroundTasks();
- }
-
- int func_index = -1;
-
- WasmCodeWrapper result =
- job_->compiler_->FinishCompilationUnit(&thrower, &func_index);
-
- if (thrower.error()) {
- // An error was detected, we stop compiling and wait for the
- // background tasks to finish.
- failed_ = true;
- break;
- } else if (result.is_null()) {
- // The working queue was empty, we break the loop. If new work units
- // are enqueued, the background task will start this
- // FinishCompilationUnits task again.
- break;
- } else {
- DCHECK_LE(0, func_index);
- if (result.IsCodeObject()) {
- job_->code_table_->set(func_index, *result.GetCode());
- }
- --job_->outstanding_units_;
- }
-
- if (deadline < MonotonicallyIncreasingTimeInMs()) {
- // We reached the deadline. We reschedule this task and return
- // immediately. Since we rescheduled this task already, we do not set
- // the FinisherIsRunning flat to false.
- job_->StartForegroundTask();
- return;
- }
- }
- // This task finishes without being rescheduled. Therefore we set the
- // FinisherIsRunning flag to false.
- job_->compiler_->SetFinisherIsRunning(false);
- if (thrower.error()) {
- // Make sure all compilation tasks stopped running.
- job_->background_task_manager_.CancelAndWait();
-
- // Close the CodeSpaceMemoryModificationScope before we reject the promise
- // in AsyncCompileFailed. Promise::Reject calls directly into JavaScript.
- modification_scope.reset();
- return job_->AsyncCompileFailed(thrower);
- }
- if (job_->outstanding_units_ == 0) {
- // Make sure all compilation tasks stopped running.
- job_->background_task_manager_.CancelAndWait();
- if (job_->DecrementAndCheckFinisherCount()) job_->DoSync<FinishCompile>();
- }
+ TRACE_COMPILE("(4b) Compilation Failed...\n");
+ return job_->AsyncCompileFailed(error_reason_);
}
private:
- std::atomic<bool> failed_{false};
+ Handle<Object> error_reason_;
};
//==========================================================================
@@ -3871,17 +2829,7 @@ class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
class AsyncCompileJob::FinishCompile : public CompileStep {
void RunInForeground() override {
TRACE_COMPILE("(5b) Finish compile...\n");
- if (FLAG_wasm_jit_to_native) {
- RecordStats(job_->compiled_module_->GetNativeModule(), job_->counters());
- } else {
- // At this point, compilation has completed. Update the code table.
- for (int i = FLAG_skip_compiling_wasm_funcs,
- e = job_->code_table_->length();
- i < e; ++i) {
- Object* val = job_->code_table_->get(i);
- if (val->IsCode()) RecordStats(Code::cast(val), job_->counters());
- }
- }
+ RecordStats(job_->compiled_module_->GetNativeModule(), job_->counters());
// Create heap objects for script and module bytes to be stored in the
// shared module data. Asm.js is not compiled asynchronously.
@@ -3914,7 +2862,7 @@ class AsyncCompileJob::FinishCompile : public CompileStep {
WasmSharedModuleData::New(job_->isolate_, module_wrapper,
Handle<SeqOneByteString>::cast(module_bytes),
script, asm_js_offset_table);
- job_->compiled_module_->OnWasmModuleDecodingComplete(shared);
+ job_->compiled_module_->set_shared(*shared);
script->set_wasm_compiled_module(*job_->compiled_module_);
// Finish the wasm script now and make it public to the debugger.
@@ -3976,16 +2924,15 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(ResultBase error) {
ModuleResult result(nullptr);
result.MoveErrorFrom(error);
- // Check if there is already a ModuleCompiler, in which case we have to clean
- // it up as well.
- if (job_->compiler_) {
- // If {IsFinisherRunning} is true, then there is already a foreground task
- // in the task queue to execute the DecodeFail step. We do not have to start
- // a new task ourselves with DoSync.
- if (job_->compiler_->IsFinisherRunning()) {
- job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
- } else {
+ // Check if there is already a CompiledModule, in which case we have to clean
+ // up the CompilationState as well.
+ if (!job_->compiled_module_.is_null()) {
+ job_->compiled_module_->GetNativeModule()->compilation_state()->Abort();
+
+ if (job_->num_pending_foreground_tasks_ == 0) {
job_->DoSync<AsyncCompileJob::DecodeFail>(std::move(result));
+ } else {
+ job_->NextStep<AsyncCompileJob::DecodeFail>(std::move(result));
}
// Clear the {compilation_unit_builder_} if it exists. This is needed
@@ -4016,17 +2963,22 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
uint32_t offset) {
TRACE_STREAMING("Process section %d ...\n", section_code);
if (compilation_unit_builder_) {
- // We reached a section after the code section, we do not need the the
+ // We reached a section after the code section, we do not need the
// compilation_unit_builder_ anymore.
CommitCompilationUnits();
compilation_unit_builder_.reset();
}
if (section_code == SectionCode::kUnknownSectionCode) {
- // No need to decode unknown sections, even the names section. If decoding
- // of the unknown section fails, compilation should succeed anyways, and
- // even decoding the names section is unnecessary because the result comes
- // too late for streaming compilation.
- return true;
+ Decoder decoder(bytes, offset);
+ section_code = ModuleDecoder::IdentifyUnknownSection(
+ decoder, bytes.start() + bytes.length());
+ if (section_code == SectionCode::kUnknownSectionCode) {
+ // Skip unknown sections that we do not know how to handle.
+ return true;
+ }
+ // Remove the unknown section tag from the payload bytes.
+ offset += decoder.position();
+ bytes = bytes.SubVector(decoder.position(), bytes.size());
}
constexpr bool verify_functions = false;
decoder_.DecodeSection(section_code, bytes, offset, verify_functions);
@@ -4057,12 +3009,15 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(size_t functions_count,
constexpr bool on_foreground = true;
job_->step_->Run(on_foreground);
- job_->outstanding_units_ = functions_count;
+ NativeModule* native_module = job_->compiled_module_->GetNativeModule();
+ native_module->compilation_state()->SetNumberOfFunctionsToCompile(
+ functions_count);
+
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
job_->outstanding_finishers_.SetValue(2);
- compilation_unit_builder_.reset(
- new ModuleCompiler::CompilationUnitBuilder(job_->compiler_.get()));
+ compilation_unit_builder_.reset(new CompilationUnitBuilder(
+ native_module, job_->module_env_.get(), job_->centry_stub_));
return true;
}
@@ -4078,9 +3033,7 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
uint32_t index = next_function_ + decoder_.module()->num_imported_functions;
const WasmFunction* func = &decoder_.module()->functions[index];
WasmName name = {nullptr, 0};
- compilation_unit_builder_->AddUnit(
- job_->module_env_.get(), job_->compiled_module_->GetNativeModule(),
- func, offset, bytes, name);
+ compilation_unit_builder_->AddUnit(func, offset, bytes, name);
}
++next_function_;
// This method always succeeds. The return value is necessary to comply with
@@ -4090,10 +3043,7 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
void AsyncStreamingProcessor::CommitCompilationUnits() {
DCHECK(compilation_unit_builder_);
- if (compilation_unit_builder_->Commit()) {
- // Only restart background tasks when compilation units were committed.
- job_->RestartBackgroundTasks();
- }
+ compilation_unit_builder_->Commit();
}
void AsyncStreamingProcessor::OnFinishedChunk() {
@@ -4112,7 +3062,7 @@ void AsyncStreamingProcessor::OnFinishedStream(std::unique_ptr<uint8_t[]> bytes,
DCHECK(result.ok());
job_->module_ = std::move(result.val);
if (job_->DecrementAndCheckFinisherCount()) {
- if (!job_->compiler_) {
+ if (job_->compiled_module_.is_null()) {
// We are processing a WebAssembly module without code section. We need to
// prepare compilation first before we can finish it.
// {PrepareAndStartCompile} will call {FinishCompile} by itself if there
@@ -4136,20 +3086,248 @@ void AsyncStreamingProcessor::OnAbort() {
job_->Abort();
}
+CompilationState::CodeGenerationSchedule::CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator, size_t max_memory)
+ : random_number_generator_(random_number_generator),
+ max_memory_(max_memory) {
+ DCHECK_NOT_NULL(random_number_generator_);
+ DCHECK_GT(max_memory_, 0);
+}
+
+void CompilationState::CodeGenerationSchedule::Schedule(
+ std::unique_ptr<compiler::WasmCompilationUnit> item) {
+ size_t cost = item->memory_cost();
+ schedule_.push_back(std::move(item));
+ allocated_memory_ += cost;
+}
+
+bool CompilationState::CodeGenerationSchedule::CanAcceptWork() const {
+ return allocated_memory_ <= max_memory_;
+}
+
+bool CompilationState::CodeGenerationSchedule::ShouldIncreaseWorkload() const {
+ // Half the memory is unused again, we can increase the workload again.
+ return allocated_memory_ <= max_memory_ / 2;
+}
+
+std::unique_ptr<compiler::WasmCompilationUnit>
+CompilationState::CodeGenerationSchedule::GetNext() {
+ DCHECK(!IsEmpty());
+ size_t index = GetRandomIndexInSchedule();
+ auto ret = std::move(schedule_[index]);
+ std::swap(schedule_[schedule_.size() - 1], schedule_[index]);
+ schedule_.pop_back();
+ allocated_memory_ -= ret->memory_cost();
+ return ret;
+}
+
+size_t CompilationState::CodeGenerationSchedule::GetRandomIndexInSchedule() {
+ double factor = random_number_generator_->NextDouble();
+ size_t index = (size_t)(factor * schedule_.size());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, schedule_.size());
+ return index;
+}
+
+void CompilationStateDeleter::operator()(
+ CompilationState* compilation_state) const {
+ delete compilation_state;
+}
+
+std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
+ Isolate* isolate) {
+ return std::unique_ptr<CompilationState, CompilationStateDeleter>(
+ new CompilationState(isolate));
+}
+
+CompilationState::CompilationState(internal::Isolate* isolate)
+ : isolate_(isolate),
+ executed_units_(isolate->random_number_generator(),
+ GetMaxUsableMemorySize(isolate) / 2),
+ max_background_tasks_(std::max(
+ 1, std::min(FLAG_wasm_num_compilation_tasks,
+ V8::GetCurrentPlatform()->NumberOfWorkerThreads()))) {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ v8::Platform* platform = V8::GetCurrentPlatform();
+ foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
+ background_task_runner_ = platform->GetWorkerThreadsTaskRunner(v8_isolate);
+
+ // Register task manager for clean shutdown in case of an isolate shutdown.
+ isolate_->wasm_engine()->Register(&background_task_manager_);
+}
+
+CompilationState::~CompilationState() {
+ CancelAndWait();
+ foreground_task_manager_.CancelAndWait();
+}
+
+void CompilationState::SetNumberOfFunctionsToCompile(size_t num_functions) {
+ DCHECK(!failed());
+ outstanding_units_ = num_functions;
+}
+
+void CompilationState::AddCallback(
+ std::function<void(CompilationEvent, Handle<Object>)> callback) {
+ callbacks_.push_back(callback);
+}
+
+void CompilationState::AddCompilationUnits(
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>& units) {
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ compilation_units_.insert(compilation_units_.end(),
+ std::make_move_iterator(units.begin()),
+ std::make_move_iterator(units.end()));
+ }
+ RestartBackgroundTasks(units.size());
+}
+
+std::unique_ptr<compiler::WasmCompilationUnit>
+CompilationState::GetNextCompilationUnit() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!compilation_units_.empty()) {
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ std::move(compilation_units_.back());
+ compilation_units_.pop_back();
+ return unit;
+ }
+
+ return std::unique_ptr<compiler::WasmCompilationUnit>();
+}
+
+std::unique_ptr<compiler::WasmCompilationUnit>
+CompilationState::GetNextExecutedUnit() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!executed_units_.IsEmpty()) {
+ return executed_units_.GetNext();
+ }
+
+ return std::unique_ptr<compiler::WasmCompilationUnit>();
+}
+
+bool CompilationState::HasCompilationUnitToFinish() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return !executed_units_.IsEmpty();
+}
+
+void CompilationState::OnError(Handle<Object> error,
+ NotifyCompilationCallback notify) {
+ Abort();
+ if (notify == NotifyCompilationCallback::kNotify) {
+ NotifyOnEvent(CompilationEvent::kFailedCompilation, error);
+ }
+}
+
+void CompilationState::OnFinishedUnit(NotifyCompilationCallback notify) {
+ DCHECK_GT(outstanding_units_, 0);
+ --outstanding_units_;
+
+ if (outstanding_units_ == 0) {
+ CancelAndWait();
+ if (notify == NotifyCompilationCallback::kNotify) {
+ NotifyOnEvent(CompilationEvent::kFinishedBaselineCompilation,
+ Handle<Object>::null());
+ }
+ }
+}
+
+void CompilationState::ScheduleUnitForFinishing(
+ std::unique_ptr<compiler::WasmCompilationUnit>& unit) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ executed_units_.Schedule(std::move(unit));
+
+ if (!finisher_is_running_ && !failed_) {
+ ScheduleFinisherTask();
+ // We set the flag here so that not more than one finisher is started.
+ finisher_is_running_ = true;
+ }
+}
+
+void CompilationState::CancelAndWait() {
+ background_task_manager_.CancelAndWait();
+ isolate_->wasm_engine()->Unregister(&background_task_manager_);
+}
+
+void CompilationState::OnBackgroundTaskStopped() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_LE(1, num_background_tasks_);
+ --num_background_tasks_;
+}
+
+void CompilationState::RestartBackgroundTasks(size_t max) {
+ size_t num_restart = max;
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!executed_units_.ShouldIncreaseWorkload()) return;
+ DCHECK_LE(num_background_tasks_, max_background_tasks_);
+ if (num_background_tasks_ == max_background_tasks_) return;
+ num_restart = std::min(
+ num_restart, std::min(compilation_units_.size(),
+ max_background_tasks_ - num_background_tasks_));
+ num_background_tasks_ += num_restart;
+ }
+
+ // If --wasm-num-compilation-tasks=0 is passed, do only spawn foreground
+ // tasks. This is used to make timing deterministic.
+ v8::TaskRunner* task_runner = FLAG_wasm_num_compilation_tasks > 0
+ ? background_task_runner_.get()
+ : foreground_task_runner_.get();
+ for (; num_restart > 0; --num_restart) {
+ task_runner->PostTask(base::make_unique<BackgroundCompileTask>(
+ this, &background_task_manager_));
+ }
+}
+
+bool CompilationState::SetFinisherIsRunning(bool value) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (finisher_is_running_ == value) return false;
+ finisher_is_running_ = value;
+ return true;
+}
+
+void CompilationState::ScheduleFinisherTask() {
+ foreground_task_runner_->PostTask(
+ base::make_unique<FinishCompileTask>(this, &foreground_task_manager_));
+}
+
+bool CompilationState::StopBackgroundCompilationTaskForThrottling() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK_LE(1, num_background_tasks_);
+ if (executed_units_.CanAcceptWork()) return false;
+ --num_background_tasks_;
+ return true;
+}
+
+void CompilationState::Abort() {
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ failed_ = true;
+ }
+ CancelAndWait();
+}
+
+void CompilationState::NotifyOnEvent(CompilationEvent event,
+ Handle<Object> error) {
+ for (auto& callback_function : callbacks_) {
+ callback_function(event, error);
+ }
+}
+
void CompileJsToWasmWrappers(Isolate* isolate,
Handle<WasmCompiledModule> compiled_module,
Counters* counters) {
JSToWasmWrapperCache js_to_wasm_cache;
+ Handle<WeakCell> weak_instance(compiled_module->weak_owning_instance(),
+ isolate);
+ js_to_wasm_cache.SetWeakInstance(weak_instance);
int wrapper_index = 0;
Handle<FixedArray> export_wrappers(compiled_module->export_wrappers(),
isolate);
- Handle<FixedArray> code_table(compiled_module->code_table(), isolate);
NativeModule* native_module = compiled_module->GetNativeModule();
for (auto exp : compiled_module->shared()->module()->export_table) {
if (exp.kind != kExternalFunction) continue;
- WasmCodeWrapper wasm_code =
- EnsureExportedLazyDeoptData(isolate, Handle<WasmInstanceObject>::null(),
- code_table, native_module, exp.index);
+ wasm::WasmCode* wasm_code =
+ native_module->GetIndirectlyCallableCode(exp.index);
Handle<Code> wrapper_code = js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
isolate, compiled_module->shared()->module(), wasm_code, exp.index,
compiled_module->use_trap_handler());
@@ -4193,7 +3371,6 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
} // namespace internal
} // namespace v8
-#undef WasmPatchWasmToWasmWrapper
#undef TRACE
#undef TRACE_CHAIN
#undef TRACE_COMPILE
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index b41ca28cea..917d9b2ef0 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -22,6 +22,16 @@ namespace wasm {
class ModuleCompiler;
class WasmCode;
+class CompilationState;
+
+struct CompilationStateDeleter {
+ void operator()(CompilationState* compilation_state) const;
+};
+
+// Wrapper to create a CompilationState exists in order to avoid having
+// the the CompilationState in the header file.
+std::unique_ptr<CompilationState, CompilationStateDeleter> NewCompilationState(
+ Isolate* isolate);
MaybeHandle<WasmModuleObject> CompileToModuleObject(
Isolate* isolate, ErrorThrower* thrower, std::unique_ptr<WasmModule> module,
@@ -50,50 +60,7 @@ V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
// an error occurred. In the latter case, a pending exception has been set,
// which will be triggered when returning from the runtime function, i.e. the
// Illegal builtin will never be called.
-Address CompileLazy(Isolate* isolate);
-Handle<Code> CompileLazyOnGCHeap(Isolate* isolate);
-
-// This class orchestrates the lazy compilation of wasm functions. It is
-// triggered by the WasmCompileLazy builtin.
-// It contains the logic for compiling and specializing wasm functions, and
-// patching the calling wasm code.
-// Once we support concurrent lazy compilation, this class will contain the
-// logic to actually orchestrate parallel execution of wasm compilation jobs.
-// TODO(clemensh): Implement concurrent lazy compilation.
-class LazyCompilationOrchestrator {
- const WasmCode* CompileFunction(Isolate*, Handle<WasmInstanceObject>,
- int func_index);
-
- public:
- Handle<Code> CompileLazyOnGCHeap(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int call_offset,
- int exported_func_index, bool patch_caller);
- const wasm::WasmCode* CompileFromJsToWasm(Isolate*,
- Handle<WasmInstanceObject>,
- Handle<Code> caller,
- uint32_t exported_func_index);
- const wasm::WasmCode* CompileDirectCall(Isolate*, Handle<WasmInstanceObject>,
- Maybe<uint32_t>,
- const WasmCode* caller,
- int call_offset);
- const wasm::WasmCode* CompileIndirectCall(Isolate*,
- Handle<WasmInstanceObject>,
- uint32_t func_index);
-
-#ifdef DEBUG
- // Call this method in tests to disallow any further lazy compilation; then
- // call into the wasm instance again to verify that no lazy compilation is
- // triggered.
- void FreezeLazyCompilationForTesting() { frozen_ = true; }
- bool IsFrozenForTesting() const { return frozen_; }
-
- private:
- bool frozen_;
-#else
- void FreezeLazyCompilationForTesting() {}
- bool IsFrozenForTesting() { return false; }
-#endif
-};
+Address CompileLazy(Isolate* isolate, Handle<WasmInstanceObject> instance);
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
@@ -124,7 +91,7 @@ class AsyncCompileJob {
class DecodeModule;
class DecodeFail;
class PrepareAndStartCompile;
- class ExecuteAndFinishCompilationUnits;
+ class CompileFailed;
class WaitForBackgroundTasks;
class FinishCompilationUnits;
class FinishCompile;
@@ -137,7 +104,7 @@ class AsyncCompileJob {
}
Counters* counters() const { return async_counters().get(); }
- void AsyncCompileFailed(ErrorThrower& thrower);
+ void AsyncCompileFailed(Handle<Object> error_reason);
void AsyncCompileSucceeded(Handle<Object> result);
@@ -145,8 +112,6 @@ class AsyncCompileJob {
void StartBackgroundTask();
- void RestartBackgroundTasks();
-
// Switches to the compilation step {Step} and starts a foreground task to
// execute it.
template <typename Step, typename... Args>
@@ -172,22 +137,19 @@ class AsyncCompileJob {
ModuleWireBytes wire_bytes_;
Handle<Context> context_;
Handle<JSPromise> module_promise_;
- std::unique_ptr<ModuleCompiler> compiler_;
std::unique_ptr<compiler::ModuleEnv> module_env_;
std::unique_ptr<WasmModule> module_;
std::vector<DeferredHandles*> deferred_handles_;
Handle<WasmModuleObject> module_object_;
Handle<WasmCompiledModule> compiled_module_;
- Handle<FixedArray> code_table_;
- size_t outstanding_units_ = 0;
+
std::unique_ptr<CompileStep> step_;
CancelableTaskManager background_task_manager_;
+ Handle<Code> centry_stub_;
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
std::shared_ptr<v8::TaskRunner> background_task_runner_;
- // The number of background tasks which stopped executing within a step.
- base::AtomicNumber<size_t> stopped_tasks_{0};
// For async compilation the AsyncCompileJob is the only finisher. For
// streaming compilation also the AsyncStreamingProcessor has to finish before
@@ -210,7 +172,6 @@ class AsyncCompileJob {
// StreamingDecoder.
std::shared_ptr<StreamingDecoder> stream_;
};
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 109b2fc230..3bf22f1047 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -91,6 +91,12 @@ const char* SectionName(SectionCode code) {
namespace {
+bool validate_utf8(Decoder* decoder, WireBytesRef string) {
+ return unibrow::Utf8::ValidateEncoding(
+ decoder->start() + decoder->GetBufferRelativeOffset(string.offset()),
+ string.length());
+}
+
ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
switch (expr.kind) {
case WasmInitExpr::kNone:
@@ -107,6 +113,8 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF32;
case WasmInitExpr::kF64Const:
return kWasmF64;
+ case WasmInitExpr::kAnyRefConst:
+ return kWasmAnyRef;
default:
UNREACHABLE();
}
@@ -212,25 +220,11 @@ class WasmSectionIterator {
if (section_code == kUnknownSectionCode) {
// Check for the known "name" section.
- WireBytesRef string =
- wasm::consume_string(decoder_, true, "section name");
- if (decoder_.failed() || decoder_.pc() > section_end_) {
- section_code_ = kUnknownSectionCode;
- return;
- }
- const byte* section_name_start =
- decoder_.start() + decoder_.GetBufferRelativeOffset(string.offset());
+ section_code =
+ ModuleDecoder::IdentifyUnknownSection(decoder_, section_end_);
+ // As a side effect, the above function will forward the decoder to after
+ // the identifier string.
payload_start_ = decoder_.pc();
-
- TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder_.start()),
- string.length() < 20 ? string.length() : 20, section_name_start);
-
- if (string.length() == num_chars(kNameString) &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kNameString, num_chars(kNameString)) == 0) {
- section_code = kNameSectionCode;
- }
} else if (!IsValidSectionCode(section_code)) {
decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
section_code);
@@ -468,7 +462,6 @@ class ModuleDecoderImpl : public Decoder {
module_->functions.push_back({nullptr, // sig
import->index, // func_index
0, // sig_index
- {0, 0}, // name_offset
{0, 0}, // code
true, // imported
false}); // exported
@@ -535,7 +528,6 @@ class ModuleDecoderImpl : public Decoder {
module_->functions.push_back({nullptr, // sig
func_index, // func_index
0, // sig_index
- {0, 0}, // name
{0, 0}, // code
false, // imported
false}); // exported
@@ -794,35 +786,13 @@ class ModuleDecoderImpl : public Decoder {
uint32_t name_payload_len = inner.consume_u32v("name payload length");
if (!inner.checkAvailable(name_payload_len)) break;
- // Decode function names, ignore the rest.
- // Local names will be decoded when needed.
- switch (name_type) {
- case NameSectionKindCode::kModule: {
- WireBytesRef name = wasm::consume_string(inner, false, "module name");
- if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
- break;
- }
- case NameSectionKindCode::kFunction: {
- uint32_t functions_count = inner.consume_u32v("functions count");
-
- for (; inner.ok() && functions_count > 0; --functions_count) {
- uint32_t function_index = inner.consume_u32v("function index");
- WireBytesRef name =
- wasm::consume_string(inner, false, "function name");
-
- // Be lenient with errors in the name section: Ignore illegal
- // or out-of-order indexes and non-UTF8 names. You can even assign
- // to the same function multiple times (last valid one wins).
- if (inner.ok() && function_index < module_->functions.size() &&
- validate_utf8(&inner, name)) {
- module_->functions[function_index].name = name;
- }
- }
- break;
- }
- default:
- inner.consume_bytes(name_payload_len, "name subsection payload");
- break;
+ // Decode module name, ignore the rest.
+ // Function and local names will be decoded when needed.
+ if (name_type == NameSectionKindCode::kModule) {
+ WireBytesRef name = wasm::consume_string(inner, false, "module name");
+ if (inner.ok() && validate_utf8(&inner, name)) module_->name = name;
+ } else {
+ inner.consume_bytes(name_payload_len, "name subsection payload");
}
}
// Skip the whole names section in the outer decoder.
@@ -895,7 +865,6 @@ class ModuleDecoderImpl : public Decoder {
std::unique_ptr<WasmFunction> function) {
pc_ = start_;
function->sig = consume_sig(zone);
- function->name = {0, 0};
function->code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
if (ok())
@@ -978,30 +947,26 @@ class ModuleDecoderImpl : public Decoder {
global->mutability = consume_mutability();
const byte* pos = pc();
global->init = consume_init_expr(module, kWasmStmt);
- switch (global->init.kind) {
- case WasmInitExpr::kGlobalIndex: {
- uint32_t other_index = global->init.val.global_index;
- if (other_index >= index) {
- errorf(pos,
- "invalid global index in init expression, "
- "index %u, other_index %u",
- index, other_index);
- } else if (module->globals[other_index].type != global->type) {
- errorf(pos,
- "type mismatch in global initialization "
- "(from global #%u), expected %s, got %s",
- other_index, WasmOpcodes::TypeName(global->type),
- WasmOpcodes::TypeName(module->globals[other_index].type));
- }
- break;
+ if (global->init.kind == WasmInitExpr::kGlobalIndex) {
+ uint32_t other_index = global->init.val.global_index;
+ if (other_index >= index) {
+ errorf(pos,
+ "invalid global index in init expression, "
+ "index %u, other_index %u",
+ index, other_index);
+ } else if (module->globals[other_index].type != global->type) {
+ errorf(pos,
+ "type mismatch in global initialization "
+ "(from global #%u), expected %s, got %s",
+ other_index, WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(module->globals[other_index].type));
+ }
+ } else {
+ if (global->type != TypeOf(module, global->init)) {
+ errorf(pos, "type error in global initialization, expected %s, got %s",
+ WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(TypeOf(module, global->init)));
}
- default:
- if (global->type != TypeOf(module, global->init)) {
- errorf(pos,
- "type error in global initialization, expected %s, got %s",
- WasmOpcodes::TypeName(global->type),
- WasmOpcodes::TypeName(TypeOf(module, global->init)));
- }
}
}
@@ -1039,7 +1004,8 @@ class ModuleDecoderImpl : public Decoder {
void VerifyFunctionBody(AccountingAllocator* allocator, uint32_t func_num,
const ModuleWireBytes& wire_bytes,
const WasmModule* module, WasmFunction* function) {
- WasmFunctionName func_name(function, wire_bytes.GetNameOrNull(function));
+ WasmFunctionName func_name(function,
+ wire_bytes.GetNameOrNull(function, module));
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
os << "Verifying wasm function " << func_name << std::endl;
@@ -1067,12 +1033,6 @@ class ModuleDecoderImpl : public Decoder {
return wasm::consume_string(*this, validate_utf8, name);
}
- bool validate_utf8(Decoder* decoder, WireBytesRef string) {
- return unibrow::Utf8::ValidateEncoding(
- decoder->start() + decoder->GetBufferRelativeOffset(string.offset()),
- string.length());
- }
-
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
@@ -1243,6 +1203,14 @@ class ModuleDecoderImpl : public Decoder {
len = operand.length;
break;
}
+ case kExprRefNull: {
+ if (FLAG_experimental_wasm_anyref) {
+ expr.kind = WasmInitExpr::kAnyRefConst;
+ len = 0;
+ break;
+ }
+ V8_FALLTHROUGH;
+ }
default: {
error("invalid opcode in initialization expression");
expr.kind = WasmInitExpr::kNone;
@@ -1282,10 +1250,14 @@ class ModuleDecoderImpl : public Decoder {
case kLocalF64:
return kWasmF64;
default:
- if (IsWasm() && FLAG_experimental_wasm_simd) {
+ if (IsWasm()) {
switch (t) {
case kLocalS128:
- return kWasmS128;
+ if (FLAG_experimental_wasm_simd) return kWasmS128;
+ break;
+ case kLocalAnyRef:
+ if (FLAG_experimental_wasm_anyref) return kWasmAnyRef;
+ break;
default:
break;
}
@@ -1416,6 +1388,27 @@ ModuleResult ModuleDecoder::FinishDecoding(bool verify_functions) {
return impl_->FinishDecoding(verify_functions);
}
+SectionCode ModuleDecoder::IdentifyUnknownSection(Decoder& decoder,
+ const byte* end) {
+ WireBytesRef string = wasm::consume_string(decoder, true, "section name");
+ if (decoder.failed() || decoder.pc() > end) {
+ return kUnknownSectionCode;
+ }
+ const byte* section_name_start =
+ decoder.start() + decoder.GetBufferRelativeOffset(string.offset());
+
+ TRACE(" +%d section name : \"%.*s\"\n",
+ static_cast<int>(section_name_start - decoder.start()),
+ string.length() < 20 ? string.length() : 20, section_name_start);
+
+ if (string.length() == num_chars(kNameString) &&
+ strncmp(reinterpret_cast<const char*>(section_name_start), kNameString,
+ num_chars(kNameString)) == 0) {
+ return kNameSectionCode;
+ }
+ return kUnknownSectionCode;
+}
+
bool ModuleDecoder::ok() { return impl_->ok(); }
ModuleResult SyncDecodeWasmModule(Isolate* isolate, const byte* module_start,
@@ -1574,13 +1567,10 @@ std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
return result;
}
-void DecodeLocalNames(const byte* module_start, const byte* module_end,
- LocalNames* result) {
- DCHECK_NOT_NULL(result);
- DCHECK(result->names.empty());
+namespace {
+bool FindSection(Decoder& decoder, SectionCode section_code) {
static constexpr int kModuleHeaderSize = 8;
- Decoder decoder(module_start, module_end);
decoder.consume_bytes(kModuleHeaderSize, "module header");
WasmSectionIterator section_iter(decoder);
@@ -1589,10 +1579,57 @@ void DecodeLocalNames(const byte* module_start, const byte* module_end,
section_iter.section_code() != kNameSectionCode) {
section_iter.advance(true);
}
- if (!section_iter.more()) return;
+ if (!section_iter.more()) return false;
// Reset the decoder to not read beyond the name section end.
decoder.Reset(section_iter.payload(), decoder.pc_offset());
+ return true;
+}
+
+} // namespace
+
+void DecodeFunctionNames(const byte* module_start, const byte* module_end,
+ std::unordered_map<uint32_t, WireBytesRef>* names) {
+ DCHECK_NOT_NULL(names);
+ DCHECK(names->empty());
+
+ Decoder decoder(module_start, module_end);
+ if (!FindSection(decoder, kNameSectionCode)) return;
+
+ while (decoder.ok() && decoder.more()) {
+ uint8_t name_type = decoder.consume_u8("name type");
+ if (name_type & 0x80) break; // no varuint7
+
+ uint32_t name_payload_len = decoder.consume_u32v("name payload length");
+ if (!decoder.checkAvailable(name_payload_len)) break;
+
+ if (name_type != NameSectionKindCode::kFunction) {
+ decoder.consume_bytes(name_payload_len, "name subsection payload");
+ continue;
+ }
+ uint32_t functions_count = decoder.consume_u32v("functions count");
+
+ for (; decoder.ok() && functions_count > 0; --functions_count) {
+ uint32_t function_index = decoder.consume_u32v("function index");
+ WireBytesRef name = wasm::consume_string(decoder, false, "function name");
+
+ // Be lenient with errors in the name section: Ignore non-UTF8 names. You
+ // can even assign to the same function multiple times (last valid one
+ // wins).
+ if (decoder.ok() && validate_utf8(&decoder, name)) {
+ names->insert(std::make_pair(function_index, name));
+ }
+ }
+ }
+}
+
+void DecodeLocalNames(const byte* module_start, const byte* module_end,
+ LocalNames* result) {
+ DCHECK_NOT_NULL(result);
+ DCHECK(result->names.empty());
+
+ Decoder decoder(module_start, module_end);
+ if (!FindSection(decoder, kNameSectionCode)) return;
while (decoder.ok() && decoder.more()) {
uint8_t name_type = decoder.consume_u8("name type");
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index f98a5ed66d..d7ce2c679a 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -107,6 +107,12 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
const byte* module_end);
+// Decode the function names from the name section.
+// Returns the result as an unordered map. Only names with valid utf8 encoding
+// are stored and conflicts are resolved by choosing the last name read.
+void DecodeFunctionNames(const byte* module_start, const byte* module_end,
+ std::unordered_map<uint32_t, WireBytesRef>* names);
+
// Decode the local names assignment from the name section.
// Stores the result in the given {LocalNames} structure. The result will be
// empty if no name section is present. On encountering an error in the name
@@ -140,6 +146,16 @@ class ModuleDecoder {
bool ok();
+ // Translates the unknown section that decoder is pointing to to an extended
+ // SectionCode if the unknown section is known to decoder. Currently this only
+ // handles the name section.
+ // The decoder is expected to point after the section lenght and just before
+ // the identifier string of the unknown section.
+ // If a SectionCode other than kUnknownSectionCode is returned, the decoder
+ // will point right after the identifier string. Otherwise, the position is
+ // undefined.
+ static SectionCode IdentifyUnknownSection(Decoder& decoder, const byte* end);
+
private:
std::unique_ptr<ModuleDecoderImpl> impl_;
};
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index 25f61d2e12..9811395661 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -31,6 +31,16 @@ namespace wasm {
namespace {
+// Binary predicate to perform lookups in {NativeModule::owned_code_} with a
+// given address into a code object. Use with {std::upper_bound} for example.
+struct WasmCodeUniquePtrComparator {
+ bool operator()(Address pc, const std::unique_ptr<WasmCode>& code) const {
+ DCHECK_NOT_NULL(pc);
+ DCHECK_NOT_NULL(code);
+ return pc < code->instructions().start();
+ }
+};
+
#if V8_TARGET_ARCH_X64
#define __ masm->
constexpr bool kModuleCanAllocateMoreMemory = false;
@@ -53,25 +63,17 @@ void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
const bool kModuleCanAllocateMoreMemory = true;
#endif
-void PatchTrampolineAndStubCalls(
- const WasmCode* original_code, const WasmCode* new_code,
- const std::unordered_map<Address, Address, AddressHasher>& reverse_lookup) {
- RelocIterator orig_it(
- original_code->instructions(), original_code->reloc_info(),
- original_code->constant_pool(), RelocInfo::kCodeTargetMask);
- for (RelocIterator it(new_code->instructions(), new_code->reloc_info(),
- new_code->constant_pool(), RelocInfo::kCodeTargetMask);
- !it.done(); it.next(), orig_it.next()) {
- Address old_target = orig_it.rinfo()->target_address();
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
- auto found = reverse_lookup.find(old_target);
- DCHECK(found != reverse_lookup.end());
- Address new_target = found->second;
-#else
- Address new_target = old_target;
-#endif
- it.rinfo()->set_target_address(new_target, SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+void RelocateCode(WasmCode* code, const WasmCode* orig,
+ WasmCode::FlushICache flush_icache) {
+ intptr_t delta = code->instructions().start() - orig->instructions().start();
+ for (RelocIterator it(code->instructions(), code->reloc_info(),
+ code->constant_pool(), RelocInfo::kApplyMask);
+ !it.done(); it.next()) {
+ it.rinfo()->apply(delta);
+ }
+ if (flush_icache) {
+ Assembler::FlushICache(code->instructions().start(),
+ code->instructions().size());
}
}
@@ -187,6 +189,45 @@ bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
+bool WasmCode::ShouldBeLogged(Isolate* isolate) {
+ return isolate->logger()->is_logging_code_events() ||
+ isolate->is_profiling() || FLAG_print_wasm_code || FLAG_print_code;
+}
+
+void WasmCode::LogCode(Isolate* isolate) const {
+ DCHECK(ShouldBeLogged(isolate));
+ if (native_module()->compiled_module()->has_shared() && index_.IsJust()) {
+ uint32_t index = this->index();
+ Handle<WasmSharedModuleData> shared_handle(
+ native_module()->compiled_module()->shared(), isolate);
+ int name_length;
+ Handle<String> name(
+ WasmSharedModuleData::GetFunctionName(isolate, shared_handle, index));
+ auto cname =
+ name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
+ RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
+ PROFILE(isolate,
+ CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
+ {cname.get(), static_cast<size_t>(name_length)}));
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ OFStream os(stdout);
+ os << "--- Wasm " << (is_liftoff() ? "liftoff" : "turbofan")
+ << " code ---\n";
+ this->Disassemble(cname.get(), isolate, os);
+ os << "--- End code ---\n";
+ }
+#endif
+
+ if (!source_positions().is_empty()) {
+ LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instructions().start(),
+ source_positions()));
+ }
+ }
+}
+
void WasmCode::Print(Isolate* isolate) const {
OFStream os(stdout);
Disassemble(nullptr, isolate, os);
@@ -203,8 +244,14 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate,
#ifdef ENABLE_DISASSEMBLER
- size_t instruction_size =
- std::min(constant_pool_offset_, safepoint_table_offset_);
+ size_t instruction_size = body_size;
+ if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
+ instruction_size = constant_pool_offset_;
+ }
+ if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
+ instruction_size = safepoint_table_offset_;
+ }
+ DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
// TODO(mtrofin): rework the dependency on isolate and code in
// Disassembler::Decode.
@@ -212,21 +259,15 @@ void WasmCode::Disassemble(const char* name, Isolate* isolate,
instructions().start() + instruction_size, nullptr);
os << "\n";
- // Anonymous functions don't have source positions.
- if (!IsAnonymous()) {
- Object* source_positions_or_undef =
- owner_->compiled_module()->source_positions()->get(index());
- if (!source_positions_or_undef->IsUndefined(isolate)) {
- os << "Source positions:\n pc offset position\n";
- for (SourcePositionTableIterator it(
- ByteArray::cast(source_positions_or_undef));
- !it.done(); it.Advance()) {
- os << std::setw(10) << std::hex << it.code_offset() << std::dec
- << std::setw(10) << it.source_position().ScriptOffset()
- << (it.is_statement() ? " statement" : "") << "\n";
- }
- os << "\n";
+ if (!source_positions().is_empty()) {
+ os << "Source positions:\n pc offset position\n";
+ for (SourcePositionTableIterator it(source_positions()); !it.done();
+ it.Advance()) {
+ os << std::setw(10) << std::hex << it.code_offset() << std::dec
+ << std::setw(10) << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
}
+ os << "\n";
}
os << "RelocInfo (size = " << reloc_size_ << ")\n";
@@ -242,8 +283,6 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
case WasmCode::kFunction:
return "wasm function";
- case WasmCode::kWasmToWasmWrapper:
- return "wasm-to-wasm";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
case WasmCode::kLazyStub:
@@ -271,6 +310,137 @@ WasmCode::~WasmCode() {
}
}
+// Helper class to selectively clone and patch code from a
+// {source_native_module} into a {cloning_native_module}.
+class NativeModule::CloneCodeHelper {
+ public:
+ explicit CloneCodeHelper(NativeModule* source_native_module,
+ NativeModule* cloning_native_module);
+
+ void SelectForCloning(int32_t code_index);
+
+ void CloneAndPatchCode(bool patch_stub_to_stub_calls);
+
+ void PatchTrampolineAndStubCalls(const WasmCode* original_code,
+ const WasmCode* new_code,
+ WasmCode::FlushICache flush_icache);
+
+ private:
+ void PatchStubToStubCalls();
+
+ NativeModule* source_native_module_;
+ NativeModule* cloning_native_module_;
+ std::vector<uint32_t> selection_;
+ std::unordered_map<Address, Address, AddressHasher> reverse_lookup_;
+};
+
+NativeModule::CloneCodeHelper::CloneCodeHelper(
+ NativeModule* source_native_module, NativeModule* cloning_native_module)
+ : source_native_module_(source_native_module),
+ cloning_native_module_(cloning_native_module) {
+ for (auto& pair : source_native_module_->trampolines_) {
+ Address old_dest = pair.second;
+ auto local = cloning_native_module_->trampolines_.find(pair.first);
+ DCHECK(local != cloning_native_module_->trampolines_.end());
+ Address new_dest = local->second;
+ reverse_lookup_.emplace(old_dest, new_dest);
+ }
+
+ for (auto& pair : source_native_module_->stubs_) {
+ Address old_dest = pair.second->instructions().start();
+ auto local = cloning_native_module_->stubs_.find(pair.first);
+ DCHECK(local != cloning_native_module_->stubs_.end());
+ Address new_dest = local->second->instructions().start();
+ reverse_lookup_.emplace(old_dest, new_dest);
+ }
+}
+
+void NativeModule::CloneCodeHelper::SelectForCloning(int32_t code_index) {
+ selection_.emplace_back(code_index);
+}
+
+void NativeModule::CloneCodeHelper::CloneAndPatchCode(
+ bool patch_stub_to_stub_calls) {
+ if (patch_stub_to_stub_calls) {
+ PatchStubToStubCalls();
+ }
+
+ WasmCode* anonymous_lazy_builtin = nullptr;
+ for (uint32_t index : selection_) {
+ const WasmCode* original_code = source_native_module_->GetCode(index);
+ switch (original_code->kind()) {
+ case WasmCode::kLazyStub: {
+ // Use the first anonymous lazy compile stub hit in this loop as the
+ // canonical copy for all further ones by remembering it locally via
+ // the {anonymous_lazy_builtin} variable.
+ if (!original_code->IsAnonymous()) {
+ WasmCode* new_code = cloning_native_module_->CloneCode(
+ original_code, WasmCode::kNoFlushICache);
+ PatchTrampolineAndStubCalls(original_code, new_code,
+ WasmCode::kFlushICache);
+ break;
+ }
+ if (anonymous_lazy_builtin == nullptr) {
+ WasmCode* new_code = cloning_native_module_->CloneCode(
+ original_code, WasmCode::kNoFlushICache);
+ PatchTrampolineAndStubCalls(original_code, new_code,
+ WasmCode::kFlushICache);
+ anonymous_lazy_builtin = new_code;
+ }
+ cloning_native_module_->code_table_[index] = anonymous_lazy_builtin;
+ } break;
+ case WasmCode::kFunction: {
+ WasmCode* new_code = cloning_native_module_->CloneCode(
+ original_code, WasmCode::kNoFlushICache);
+ PatchTrampolineAndStubCalls(original_code, new_code,
+ WasmCode::kFlushICache);
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void NativeModule::CloneCodeHelper::PatchStubToStubCalls() {
+ for (auto& pair : cloning_native_module_->stubs_) {
+ WasmCode* new_stub = pair.second;
+ WasmCode* old_stub = source_native_module_->stubs_.find(pair.first)->second;
+ PatchTrampolineAndStubCalls(old_stub, new_stub, WasmCode::kFlushICache);
+ }
+}
+
+void NativeModule::CloneCodeHelper::PatchTrampolineAndStubCalls(
+ const WasmCode* original_code, const WasmCode* new_code,
+ WasmCode::FlushICache flush_icache) {
+ // Relocate everything in kApplyMask using this delta, and patch all code
+ // targets to call the new trampolines and stubs.
+ intptr_t delta =
+ new_code->instructions().start() - original_code->instructions().start();
+ int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask;
+ RelocIterator orig_it(original_code->instructions(),
+ original_code->reloc_info(),
+ original_code->constant_pool(), mask);
+ for (RelocIterator it(new_code->instructions(), new_code->reloc_info(),
+ new_code->constant_pool(), mask);
+ !it.done(); it.next(), orig_it.next()) {
+ if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
+ Address target = orig_it.rinfo()->target_address();
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
+ auto found = reverse_lookup_.find(target);
+ DCHECK(found != reverse_lookup_.end());
+ target = found->second;
+#endif
+ it.rinfo()->set_target_address(target, SKIP_WRITE_BARRIER);
+ } else {
+ it.rinfo()->apply(delta);
+ }
+ }
+ if (flush_icache) {
+ Assembler::FlushICache(new_code->instructions().start(),
+ new_code->instructions().size());
+ }
+}
+
base::AtomicNumber<size_t> NativeModule::next_id_;
NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
@@ -279,6 +449,8 @@ NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
: instance_id(next_id_.Increment(1)),
code_table_(num_functions),
num_imported_functions_(num_imports),
+ compilation_state_(NewCompilationState(
+ reinterpret_cast<Isolate*>(code_manager->isolate_))),
free_memory_(reinterpret_cast<Address>(mem->address()),
reinterpret_cast<Address>(mem->end())),
wasm_code_manager_(code_manager),
@@ -289,25 +461,37 @@ NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
owned_code_.reserve(num_functions);
}
-void NativeModule::ResizeCodeTableForTest(size_t last_index) {
- size_t new_size = last_index + 1;
- if (new_size > FunctionCount()) {
- Isolate* isolate = compiled_module()->GetIsolate();
- code_table_.resize(new_size);
- int grow_by = static_cast<int>(new_size) -
- compiled_module()->source_positions()->length();
- Handle<FixedArray> source_positions(compiled_module()->source_positions(),
- isolate);
- source_positions = isolate->factory()->CopyFixedArrayAndGrow(
- source_positions, grow_by, TENURED);
- compiled_module()->set_source_positions(*source_positions);
+void NativeModule::ResizeCodeTableForTesting(size_t num_functions,
+ size_t max_functions) {
+ DCHECK_LE(num_functions, max_functions);
+ if (num_imported_functions_ == num_functions) {
+ // For some tests, the code table might have been initialized to store
+ // a number of imported functions on creation. If that is the case,
+ // we need to retroactively reserve the space.
+ DCHECK_EQ(code_table_.capacity(), num_imported_functions_);
+ DCHECK_EQ(code_table_.size(), num_imported_functions_);
+ DCHECK_EQ(num_functions, 1);
+ code_table_.reserve(max_functions);
+ } else {
+ DCHECK_GT(num_functions, FunctionCount());
+ if (code_table_.capacity() == 0) {
+ code_table_.reserve(max_functions);
+ }
+ DCHECK_EQ(code_table_.capacity(), max_functions);
+ code_table_.resize(num_functions);
}
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
+ DCHECK_LT(index, FunctionCount());
return code_table_[index];
}
+void NativeModule::SetCode(uint32_t index, WasmCode* wasm_code) {
+ DCHECK_LT(index, FunctionCount());
+ code_table_[index] = wasm_code;
+}
+
uint32_t NativeModule::FunctionCount() const {
DCHECK_LE(code_table_.size(), std::numeric_limits<uint32_t>::max());
return static_cast<uint32_t>(code_table_.size());
@@ -316,37 +500,40 @@ uint32_t NativeModule::FunctionCount() const {
WasmCode* NativeModule::AddOwnedCode(
Vector<const byte> orig_instructions,
std::unique_ptr<const byte[]> reloc_info, size_t reloc_size,
+ std::unique_ptr<const byte[]> source_pos, size_t source_pos_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff) {
+ WasmCode::Tier tier, WasmCode::FlushICache flush_icache) {
// both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(orig_instructions.size());
if (executable_buffer == nullptr) {
- V8::FatalProcessOutOfMemory("NativeModule::AddOwnedCode");
+ V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
UNREACHABLE();
}
memcpy(executable_buffer, orig_instructions.start(),
orig_instructions.size());
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
- reloc_size, this, index, kind, constant_pool_offset, stack_slots,
- safepoint_table_offset, handler_table_offset,
- std::move(protected_instructions), is_liftoff));
+ reloc_size, std::move(source_pos), source_pos_size, this, index, kind,
+ constant_pool_offset, stack_slots, safepoint_table_offset,
+ handler_table_offset, std::move(protected_instructions), tier));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
// even if we end up with segmented memory, we may end up only with a few
// large moves - if, for example, a new segment is below the current ones.
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
- code, owned_code_comparer_);
+ ret->instructions().start(),
+ WasmCodeUniquePtrComparator());
owned_code_.insert(insert_before, std::move(code));
- Assembler::FlushICache(ret->instructions().start(),
- ret->instructions().size());
-
+ if (flush_icache) {
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
+ }
return ret;
}
@@ -355,8 +542,6 @@ WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
WasmCode* ret = AddAnonymousCode(code, kind);
code_table_[index] = ret;
ret->index_ = Just(index);
- compiled_module()->source_positions()->set(static_cast<int>(index),
- code->source_position_table());
return ret;
}
@@ -375,13 +560,18 @@ void NativeModule::SetLazyBuiltin(Handle<Code> code) {
}
WasmCompiledModule* NativeModule::compiled_module() const {
+ DCHECK_NOT_NULL(compiled_module_);
return *compiled_module_;
}
void NativeModule::SetCompiledModule(
Handle<WasmCompiledModule> compiled_module) {
- DCHECK(compiled_module_.is_null());
- compiled_module_ = compiled_module;
+ DCHECK_NULL(compiled_module_);
+ compiled_module_ = compiled_module->GetIsolate()
+ ->global_handles()
+ ->Create(*compiled_module)
+ .location();
+ GlobalHandles::MakeWeak(reinterpret_cast<Object***>(&compiled_module_));
}
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
@@ -391,18 +581,36 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
reloc_info.reset(new byte[code->relocation_size()]);
memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
}
+ std::unique_ptr<byte[]> source_pos;
+ Handle<ByteArray> source_pos_table(code->SourcePositionTable());
+ if (source_pos_table->length() > 0) {
+ source_pos.reset(new byte[source_pos_table->length()]);
+ source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
+ }
std::shared_ptr<ProtectedInstructions> protected_instructions(
new ProtectedInstructions(0));
- WasmCode* ret = AddOwnedCode(
- {code->instruction_start(),
- static_cast<size_t>(code->instruction_size())},
- std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
- Nothing<uint32_t>(), kind, code->constant_pool_offset(),
- (code->has_safepoint_info() ? code->stack_slots() : 0),
- (code->has_safepoint_info() ? code->safepoint_table_offset() : 0),
- code->handler_table_offset(), protected_instructions, false);
- if (ret == nullptr) return nullptr;
- intptr_t delta = ret->instructions().start() - code->instruction_start();
+ Vector<const byte> orig_instructions(
+ code->raw_instruction_start(),
+ static_cast<size_t>(code->raw_instruction_size()));
+ int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
+ int safepoint_table_offset =
+ code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
+ WasmCode* ret =
+ AddOwnedCode(orig_instructions, // instructions
+ std::move(reloc_info), // reloc_info
+ static_cast<size_t>(code->relocation_size()), // reloc_size
+ std::move(source_pos), // source positions
+ static_cast<size_t>(source_pos_table->length()),
+ Nothing<uint32_t>(), // index
+ kind, // kind
+ code->constant_pool_offset(), // constant_pool_offset
+ stack_slots, // stack_slots
+ safepoint_table_offset, // safepoint_table_offset
+ code->handler_table_offset(), // handler_table_offset
+ protected_instructions, // protected_instructions
+ WasmCode::kOther, // kind
+ WasmCode::kNoFlushICache); // flush_icache
+ intptr_t delta = ret->instructions().start() - code->raw_instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -423,6 +631,14 @@ WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
}
}
}
+ // Flush the i-cache here instead of in AddOwnedCode, to include the changes
+ // made while iterating over the RelocInfo above.
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
+ if (FLAG_print_wasm_code) {
+ // TODO(mstarzinger): don't need the isolate here.
+ ret->Print(code->GetIsolate());
+ }
return ret;
}
@@ -430,21 +646,27 @@ WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
size_t safepoint_table_offset, size_t handler_table_offset,
std::unique_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff) {
+ Handle<ByteArray> source_pos_table, WasmCode::Tier tier) {
std::unique_ptr<byte[]> reloc_info;
if (desc.reloc_size) {
reloc_info.reset(new byte[desc.reloc_size]);
memcpy(reloc_info.get(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
}
+ std::unique_ptr<byte[]> source_pos;
+ if (source_pos_table->length() > 0) {
+ source_pos.reset(new byte[source_pos_table->length()]);
+ source_pos_table->copy_out(0, source_pos.get(), source_pos_table->length());
+ }
TurboAssembler* origin = reinterpret_cast<TurboAssembler*>(desc.origin);
WasmCode* ret = AddOwnedCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
- std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
- WasmCode::kFunction, desc.instr_size - desc.constant_pool_size,
- frame_slots, safepoint_table_offset, handler_table_offset,
- std::move(protected_instructions), is_liftoff);
- if (ret == nullptr) return nullptr;
+ std::move(reloc_info), static_cast<size_t>(desc.reloc_size),
+ std::move(source_pos), static_cast<size_t>(source_pos_table->length()),
+ Just(index), WasmCode::kFunction,
+ desc.instr_size - desc.constant_pool_size, frame_slots,
+ safepoint_table_offset, handler_table_offset,
+ std::move(protected_instructions), tier, WasmCode::kNoFlushICache);
code_table_[index] = ret;
// TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
@@ -461,7 +683,7 @@ WasmCode* NativeModule::AddCode(
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
- DCHECK_EQ(*p, p->GetIsolate()->heap()->undefined_value());
+ DCHECK(p->IsUndefined(p->GetIsolate()) || p->IsNull(p->GetIsolate()));
it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles to direct pointers to the first instruction in the
@@ -479,26 +701,43 @@ WasmCode* NativeModule::AddCode(
it.rinfo()->apply(delta);
}
}
+ // Flush the i-cache here instead of in AddOwnedCode, to include the changes
+ // made while iterating over the RelocInfo above.
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
return ret;
}
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
- Address dest = code->instruction_start();
+ Address dest = code->raw_instruction_start();
GenerateJumpTrampoline(&masm, dest);
CodeDesc code_desc;
masm.GetCode(nullptr, &code_desc);
- WasmCode* wasm_code = AddOwnedCode(
- {code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
- Nothing<uint32_t>(), WasmCode::kTrampoline, 0, 0, 0, 0, {}, false);
+ Vector<const byte> instructions(code_desc.buffer,
+ static_cast<size_t>(code_desc.instr_size));
+ WasmCode* wasm_code = AddOwnedCode(instructions, // instructions
+ nullptr, // reloc_info
+ 0, // reloc_size
+ nullptr, // source_pos
+ 0, // source_pos_size
+ Nothing<uint32_t>(), // index
+ WasmCode::kTrampoline, // kind
+ 0, // constant_pool_offset
+ 0, // stack_slots
+ 0, // safepoint_table_offset
+ 0, // handler_table_offset
+ {}, // protected_instructions
+ WasmCode::kOther, // tier
+ WasmCode::kFlushICache); // flush_icache
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
return ret;
}
#else
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
- Address ret = code->instruction_start();
+ Address ret = code->raw_instruction_start();
trampolines_.insert(std::make_pair(ret, ret));
return ret;
}
@@ -516,7 +755,7 @@ Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
}
return copy->second->instructions().start();
} else {
- Address index = code->instruction_start();
+ Address index = code->raw_instruction_start();
auto trampoline_iter = trampolines_.find(index);
if (trampoline_iter == trampolines_.end()) {
return CreateTrampolineTo(code);
@@ -526,45 +765,6 @@ Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
}
}
-WasmCode* NativeModule::GetExportedWrapper(uint32_t index) {
- auto found = exported_wasm_to_wasm_wrappers_.find(index);
- if (found != exported_wasm_to_wasm_wrappers_.end()) {
- return found->second;
- }
- return nullptr;
-}
-
-WasmCode* NativeModule::AddExportedWrapper(Handle<Code> code, uint32_t index) {
- WasmCode* ret = AddAnonymousCode(code, WasmCode::kWasmToWasmWrapper);
- ret->index_ = Just(index);
- exported_wasm_to_wasm_wrappers_.insert(std::make_pair(index, ret));
- return ret;
-}
-
-void NativeModule::LinkAll() {
- for (uint32_t index = 0; index < code_table_.size(); ++index) {
- Link(index);
- }
-}
-
-void NativeModule::Link(uint32_t index) {
- WasmCode* code = code_table_[index];
- // skip imports
- if (!code) return;
- int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL);
- for (RelocIterator it(code->instructions(), code->reloc_info(),
- code->constant_pool(), mode_mask);
- !it.done(); it.next()) {
- uint32_t index = GetWasmCalleeTag(it.rinfo());
- const WasmCode* target = GetCode(index);
- if (target == nullptr) continue;
- Address target_addr = target->instructions().start();
- DCHECK_NOT_NULL(target);
- it.rinfo()->set_wasm_call_address(target_addr,
- ICacheFlushMode::SKIP_ICACHE_FLUSH);
- }
-}
-
Address NativeModule::AllocateForCode(size_t size) {
// this happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
@@ -639,10 +839,8 @@ Address NativeModule::AllocateForCode(size_t size) {
WasmCode* NativeModule::Lookup(Address pc) {
if (owned_code_.empty()) return nullptr;
- // Make a fake WasmCode temp, to look into owned_code_
- std::unique_ptr<WasmCode> temp(new WasmCode(pc));
- auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), temp,
- owned_code_comparer_);
+ auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
+ WasmCodeUniquePtrComparator());
if (iter == owned_code_.begin()) return nullptr;
--iter;
WasmCode* candidate = (*iter).get();
@@ -655,16 +853,41 @@ WasmCode* NativeModule::Lookup(Address pc) {
return nullptr;
}
-WasmCode* NativeModule::CloneLazyBuiltinInto(const WasmCode* code,
- uint32_t index) {
- DCHECK_EQ(wasm::WasmCode::kLazyStub, code->kind());
- WasmCode* ret = CloneCode(code);
- code_table_[index] = ret;
- ret->index_ = Just(index);
- return ret;
+WasmCode* NativeModule::GetIndirectlyCallableCode(uint32_t func_index) {
+ WasmCode* code = GetCode(func_index);
+ if (!code || code->kind() != WasmCode::kLazyStub) {
+ return code;
+ }
+#if DEBUG
+ auto num_imported_functions =
+ compiled_module()->shared()->module()->num_imported_functions;
+ if (func_index < num_imported_functions) {
+ DCHECK(!code->IsAnonymous());
+ }
+#endif
+ if (!code->IsAnonymous()) {
+ // If the function wasn't imported, its index should match.
+ DCHECK_IMPLIES(func_index >= num_imported_functions,
+ func_index == code->index());
+ return code;
+ }
+ if (!lazy_compile_stubs_.get()) {
+ lazy_compile_stubs_ =
+ base::make_unique<std::vector<WasmCode*>>(FunctionCount());
+ }
+ WasmCode* cloned_code = lazy_compile_stubs_.get()->at(func_index);
+ if (cloned_code == nullptr) {
+ cloned_code = CloneCode(code, WasmCode::kNoFlushICache);
+ RelocateCode(cloned_code, code, WasmCode::kFlushICache);
+ cloned_code->index_ = Just(func_index);
+ lazy_compile_stubs_.get()->at(func_index) = cloned_code;
+ }
+ DCHECK_EQ(func_index, cloned_code->index());
+ return cloned_code;
}
-void NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
+void NativeModule::CloneTrampolinesAndStubs(
+ const NativeModule* other, WasmCode::FlushICache flush_icache) {
for (auto& pair : other->trampolines_) {
Address key = pair.first;
Address local =
@@ -674,41 +897,84 @@ void NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
}
for (auto& pair : other->stubs_) {
uint32_t key = pair.first;
- WasmCode* clone = CloneCode(pair.second);
- DCHECK_NOT_NULL(clone);
+ WasmCode* clone = CloneCode(pair.second, flush_icache);
stubs_.emplace(std::make_pair(key, clone));
}
}
-WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
+WasmCode* NativeModule::CloneCode(const WasmCode* original_code,
+ WasmCode::FlushICache flush_icache) {
std::unique_ptr<byte[]> reloc_info;
if (original_code->reloc_info().size() > 0) {
reloc_info.reset(new byte[original_code->reloc_info().size()]);
memcpy(reloc_info.get(), original_code->reloc_info().start(),
original_code->reloc_info().size());
}
+ std::unique_ptr<byte[]> source_pos;
+ if (original_code->source_positions().size() > 0) {
+ source_pos.reset(new byte[original_code->source_positions().size()]);
+ memcpy(source_pos.get(), original_code->source_positions().start(),
+ original_code->source_positions().size());
+ }
WasmCode* ret = AddOwnedCode(
original_code->instructions(), std::move(reloc_info),
- original_code->reloc_info().size(), original_code->index_,
+ original_code->reloc_info().size(), std::move(source_pos),
+ original_code->source_positions().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
original_code->handler_table_offset_,
- original_code->protected_instructions_, original_code->is_liftoff());
+ original_code->protected_instructions_, original_code->tier(),
+ flush_icache);
if (!ret->IsAnonymous()) {
code_table_[ret->index()] = ret;
}
- intptr_t delta =
- ret->instructions().start() - original_code->instructions().start();
- for (RelocIterator it(ret->instructions(), ret->reloc_info(),
- ret->constant_pool(), RelocInfo::kApplyMask);
- !it.done(); it.next()) {
- it.rinfo()->apply(delta);
- }
return ret;
}
+void NativeModule::UnpackAndRegisterProtectedInstructions() {
+ for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
+ WasmCode* code = GetCode(i);
+
+ if (code == nullptr) continue;
+ if (code->kind() != wasm::WasmCode::kFunction) continue;
+ if (code->HasTrapHandlerIndex()) continue;
+
+ Address base = code->instructions().start();
+
+ size_t size = code->instructions().size();
+ const int index =
+ RegisterHandlerData(base, size, code->protected_instructions().size(),
+ code->protected_instructions().data());
+
+ // TODO(eholk): if index is negative, fail.
+ CHECK_LE(0, index);
+ code->set_trap_handler_index(static_cast<size_t>(index));
+ }
+}
+
+void NativeModule::ReleaseProtectedInstructions() {
+ for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
+ WasmCode* wasm_code = GetCode(i);
+ if (wasm_code->HasTrapHandlerIndex()) {
+ CHECK_LT(wasm_code->trap_handler_index(),
+ static_cast<size_t>(std::numeric_limits<int>::max()));
+ trap_handler::ReleaseHandlerData(
+ static_cast<int>(wasm_code->trap_handler_index()));
+ wasm_code->ResetTrapHandlerIndex();
+ }
+ }
+}
+
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
+ // Clear the handle at the beginning of destructor to make it robust against
+ // potential GCs in the rest of the desctructor.
+ if (compiled_module_ != nullptr) {
+ Isolate* isolate = compiled_module()->GetIsolate();
+ isolate->global_handles()->Destroy(
+ reinterpret_cast<Object**>(compiled_module_));
+ compiled_module_ = nullptr;
+ }
wasm_code_manager_->FreeNativeModuleMemories(this);
}
@@ -828,7 +1094,8 @@ std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
return ret;
}
- V8::FatalProcessOutOfMemory("WasmCodeManager::NewNativeModule");
+ V8::FatalProcessOutOfMemory(reinterpret_cast<Isolate*>(isolate_),
+ "WasmCodeManager::NewNativeModule");
return nullptr;
}
@@ -885,63 +1152,17 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
if (!ret) return ret;
- ret->CloneTrampolinesAndStubs(this);
+ // Clone trampolines and stubs. They are later patched, so no icache flush
+ // needed yet.
+ ret->CloneTrampolinesAndStubs(this, WasmCode::kNoFlushICache);
- std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
- for (auto& pair : trampolines_) {
- Address old_dest = pair.second;
- auto local = ret->trampolines_.find(pair.first);
- DCHECK(local != ret->trampolines_.end());
- Address new_dest = local->second;
- reverse_lookup.emplace(old_dest, new_dest);
- }
-
- for (auto& pair : stubs_) {
- Address old_dest = pair.second->instructions().start();
- auto local = ret->stubs_.find(pair.first);
- DCHECK(local != ret->stubs_.end());
- Address new_dest = local->second->instructions().start();
- reverse_lookup.emplace(old_dest, new_dest);
- }
-
- for (auto& pair : ret->stubs_) {
- WasmCode* new_stub = pair.second;
- WasmCode* old_stub = stubs_.find(pair.first)->second;
- PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
- }
-
- WasmCode* anonymous_lazy_builtin = nullptr;
+ // Create a helper for cloning and patching code.
+ CloneCodeHelper helper(this, ret.get());
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
- const WasmCode* original_code = GetCode(i);
- switch (original_code->kind()) {
- case WasmCode::kLazyStub: {
- // Use the first anonymous lazy compile stub hit in this loop as the
- // canonical copy for all further ones by remembering it locally via
- // the {anonymous_lazy_builtin} variable. All non-anonymous such stubs
- // are just cloned directly via {CloneLazyBuiltinInto} below.
- if (!original_code->IsAnonymous()) {
- WasmCode* new_code = ret->CloneLazyBuiltinInto(original_code, i);
- if (new_code == nullptr) return nullptr;
- PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
- break;
- }
- if (anonymous_lazy_builtin == nullptr) {
- WasmCode* new_code = ret->CloneCode(original_code);
- if (new_code == nullptr) return nullptr;
- PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
- anonymous_lazy_builtin = new_code;
- }
- ret->code_table_[i] = anonymous_lazy_builtin;
- } break;
- case WasmCode::kFunction: {
- WasmCode* new_code = ret->CloneCode(original_code);
- if (new_code == nullptr) return nullptr;
- PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
- } break;
- default:
- UNREACHABLE();
- }
+ helper.SelectForCloning(i);
}
+ helper.CloneAndPatchCode(true);
+
return ret;
}
@@ -1025,29 +1246,6 @@ NativeModuleModificationScope::~NativeModuleModificationScope() {
}
}
-// On Intel, call sites are encoded as a displacement. For linking
-// and for serialization/deserialization, we want to store/retrieve
-// a tag (the function index). On Intel, that means accessing the
-// raw displacement. Everywhere else, that simply means accessing
-// the target address.
-void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
- *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
-#else
- rinfo->set_target_address(reinterpret_cast<Address>(tag), SKIP_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
-#endif
-}
-
-uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
- return *(reinterpret_cast<uint32_t*>(rinfo->target_address_address()));
-#else
- return static_cast<uint32_t>(
- reinterpret_cast<size_t>(rinfo->target_address()));
-#endif
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index e398f1bcfd..c2a338c9fa 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -9,12 +9,12 @@
#include <list>
#include <map>
#include <unordered_map>
-#include <unordered_set>
#include "src/base/macros.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
+#include "src/wasm/module-compiler.h"
namespace v8 {
class Isolate;
@@ -26,16 +26,10 @@ class WasmCompiledModule;
namespace wasm {
-using GlobalHandleAddress = Address;
class NativeModule;
+class WasmCodeManager;
struct WasmModule;
-struct AddressHasher {
- size_t operator()(const Address& addr) const {
- return std::hash<intptr_t>()(reinterpret_cast<intptr_t>(addr));
- }
-};
-
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
@@ -89,7 +83,6 @@ class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
kFunction,
- kWasmToWasmWrapper,
kWasmToJsWrapper,
kLazyStub,
kInterpreterStub,
@@ -97,28 +90,32 @@ class V8_EXPORT_PRIVATE WasmCode final {
kTrampoline
};
+ // kOther is used if we have WasmCode that is neither
+ // liftoff- nor turbofan-compiled, i.e. if Kind is
+ // not a kFunction.
+ enum Tier : int8_t { kLiftoff, kTurbofan, kOther };
+
Vector<byte> instructions() const { return instructions_; }
Vector<const byte> reloc_info() const {
return {reloc_info_.get(), reloc_size_};
}
+ Vector<const byte> source_positions() const {
+ return {source_position_table_.get(), source_position_size_};
+ }
uint32_t index() const { return index_.ToChecked(); }
// Anonymous functions are functions that don't carry an index, like
// trampolines.
bool IsAnonymous() const { return index_.IsNothing(); }
Kind kind() const { return kind_; }
- NativeModule* owner() const { return owner_; }
+ NativeModule* native_module() const { return native_module_; }
+ Tier tier() const { return tier_; }
Address constant_pool() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
size_t handler_table_offset() const { return handler_table_offset_; }
uint32_t stack_slots() const { return stack_slots_; }
- bool is_liftoff() const { return is_liftoff_; }
-
- size_t trap_handler_index() const;
- void set_trap_handler_index(size_t);
- bool HasTrapHandlerIndex() const;
- void ResetTrapHandlerIndex();
+ bool is_liftoff() const { return tier_ == kLiftoff; }
const ProtectedInstructions& protected_instructions() const {
// TODO(mstarzinger): Code that doesn't have trapping instruction should
@@ -130,26 +127,35 @@ class V8_EXPORT_PRIVATE WasmCode final {
void Print(Isolate* isolate) const;
void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
+ static bool ShouldBeLogged(Isolate* isolate);
+ void LogCode(Isolate* isolate) const;
+
~WasmCode();
+ enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
+
+ // Offset of {instructions_.start()}. It is used for tiering, when
+ // we check if optimized code is available during the prologue
+ // of Liftoff-compiled code.
+ static constexpr int kInstructionStartOffset = 0;
+
private:
friend class NativeModule;
- friend class NativeModuleDeserializer;
-
- // A constructor used just for implementing Lookup.
- WasmCode(Address pc) : instructions_(pc, 0), index_(Nothing<uint32_t>()) {}
- WasmCode(Vector<byte> instructions,
- std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
- NativeModule* owner, Maybe<uint32_t> index, Kind kind,
- size_t constant_pool_offset, uint32_t stack_slots,
- size_t safepoint_table_offset, size_t handler_table_offset,
+ WasmCode(Vector<byte> instructions, std::unique_ptr<const byte[]> reloc_info,
+ size_t reloc_size, std::unique_ptr<const byte[]> source_pos,
+ size_t source_pos_size, NativeModule* native_module,
+ Maybe<uint32_t> index, Kind kind, size_t constant_pool_offset,
+ uint32_t stack_slots, size_t safepoint_table_offset,
+ size_t handler_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
- bool is_liftoff)
+ Tier tier)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
reloc_size_(reloc_size),
- owner_(owner),
+ source_position_table_(std::move(source_pos)),
+ source_position_size_(source_pos_size),
+ native_module_(native_module),
index_(index),
kind_(kind),
constant_pool_offset_(constant_pool_offset),
@@ -157,15 +163,26 @@ class V8_EXPORT_PRIVATE WasmCode final {
safepoint_table_offset_(safepoint_table_offset),
handler_table_offset_(handler_table_offset),
protected_instructions_(std::move(protected_instructions)),
- is_liftoff_(is_liftoff) {}
+ tier_(tier) {
+ DCHECK_LE(safepoint_table_offset, instructions.size());
+ DCHECK_LE(constant_pool_offset, instructions.size());
+ DCHECK_LE(handler_table_offset, instructions.size());
+ DCHECK_EQ(kInstructionStartOffset, OFFSET_OF(WasmCode, instructions_));
+ }
- WasmCode(const WasmCode&) = delete;
- WasmCode& operator=(const WasmCode&) = delete;
+ // Code objects that have been registered with the global trap handler within
+ // this process, will have a {trap_handler_index} associated with them.
+ size_t trap_handler_index() const;
+ void set_trap_handler_index(size_t);
+ bool HasTrapHandlerIndex() const;
+ void ResetTrapHandlerIndex();
Vector<byte> instructions_;
std::unique_ptr<const byte[]> reloc_info_;
size_t reloc_size_ = 0;
- NativeModule* owner_ = nullptr;
+ std::unique_ptr<const byte[]> source_position_table_;
+ size_t source_position_size_ = 0;
+ NativeModule* native_module_ = nullptr;
Maybe<uint32_t> index_;
Kind kind_;
size_t constant_pool_offset_ = 0;
@@ -177,14 +194,14 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t handler_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
std::shared_ptr<ProtectedInstructions> protected_instructions_;
- bool is_liftoff_;
+ Tier tier_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmCode);
};
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
-class WasmCodeManager;
-
// Note that we currently need to add code on the main thread, because we may
// trigger a GC if we believe there's a chance the GC would clear up native
// modules. The code is ready for concurrency otherwise, we just need to be
@@ -196,7 +213,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
size_t safepoint_table_offset, size_t handler_table_offset,
- std::unique_ptr<ProtectedInstructions>, bool is_liftoff);
+ std::unique_ptr<ProtectedInstructions>,
+ Handle<ByteArray> source_position_table,
+ WasmCode::Tier tier);
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
@@ -215,62 +234,59 @@ class V8_EXPORT_PRIVATE NativeModule final {
// by the runtime.
void SetLazyBuiltin(Handle<Code> code);
- // ExportedWrappers are WasmToWasmWrappers for functions placed on import
- // tables. We construct them as-needed.
- WasmCode* GetExportedWrapper(uint32_t index);
- WasmCode* AddExportedWrapper(Handle<Code> code, uint32_t index);
-
// FunctionCount is WasmModule::functions.size().
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
+ void SetCode(uint32_t index, WasmCode* wasm_code);
- // We special-case lazy cloning because we currently rely on making copies
- // of the lazy builtin, to be able to identify, in the runtime, which function
- // the lazy builtin is a placeholder of. If we used trampolines, we would call
- // the runtime function from a common pc. We could, then, figure who the
- // caller was if the trampolines called rather than jumped to the common
- // builtin. The logic for seeking though frames would change, though.
- // TODO(mtrofin): perhaps we can do exactly that - either before or after
- // this change.
- WasmCode* CloneLazyBuiltinInto(const WasmCode* code, uint32_t);
+ // Register/release the protected instructions in all code objects with the
+ // global trap handler for this process.
+ void UnpackAndRegisterProtectedInstructions();
+ void ReleaseProtectedInstructions();
+
+ // Gets code suitable for indirect or import calls for the given function
+ // index. If the code at the given index is the lazy compile stub, it will
+ // clone a non-anonymous lazy compile stub for the purpose.
+ WasmCode* GetIndirectlyCallableCode(uint32_t func_index);
bool SetExecutable(bool executable);
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
- void ResizeCodeTableForTest(size_t);
- void LinkAll();
- void Link(uint32_t index);
+ void ResizeCodeTableForTesting(size_t num_functions, size_t max_functions);
+
+ CompilationState* compilation_state() { return compilation_state_.get(); }
- // TODO(mstarzinger): needed until we sort out source positions, which are
- // still on the GC-heap.
+ // TODO(mstarzinger): The link to the {compiled_module} is deprecated and all
+ // uses should vanish to make {NativeModule} independent of the Isolate.
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
uint32_t num_imported_functions() const { return num_imported_functions_; }
+ const std::vector<WasmCode*>& code_table() const { return code_table_; }
+
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
~NativeModule();
+ void set_lazy_compile_frozen(bool frozen) { frozen_ = frozen; }
+ bool lazy_compile_frozen() const { return frozen_; }
+
private:
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
friend class NativeModuleModificationScope;
- struct WasmCodeUniquePtrComparer {
- bool operator()(const std::unique_ptr<WasmCode>& a,
- const std::unique_ptr<WasmCode>& b) {
- DCHECK(a);
- DCHECK(b);
- return a->instructions().start() < b->instructions().start();
+ class CloneCodeHelper;
+ struct AddressHasher {
+ size_t operator()(const Address& addr) const {
+ return std::hash<intptr_t>()(reinterpret_cast<intptr_t>(addr));
}
};
static base::AtomicNumber<size_t> next_id_;
- NativeModule(const NativeModule&) = delete;
- NativeModule& operator=(const NativeModule&) = delete;
NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* vmem,
WasmCodeManager* code_manager);
@@ -284,39 +300,55 @@ class V8_EXPORT_PRIVATE NativeModule final {
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
std::unique_ptr<const byte[]> reloc_info,
- size_t reloc_size, Maybe<uint32_t> index,
+ size_t reloc_size,
+ std::unique_ptr<const byte[]> source_pos,
+ size_t source_pos_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
size_t handler_table_offset,
- std::shared_ptr<ProtectedInstructions>,
- bool is_liftoff);
- WasmCode* CloneCode(const WasmCode*);
- void CloneTrampolinesAndStubs(const NativeModule* other);
+ std::shared_ptr<ProtectedInstructions>, WasmCode::Tier,
+ WasmCode::FlushICache);
+ WasmCode* CloneCode(const WasmCode*, WasmCode::FlushICache);
+ void CloneTrampolinesAndStubs(const NativeModule* other,
+ WasmCode::FlushICache);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
+ // Holds all allocated code objects, is maintained to be in ascending order
+ // according to the codes instruction start address to allow lookups.
std::vector<std::unique_ptr<WasmCode>> owned_code_;
- std::unordered_map<uint32_t, WasmCode*> exported_wasm_to_wasm_wrappers_;
-
- WasmCodeUniquePtrComparer owned_code_comparer_;
std::vector<WasmCode*> code_table_;
+ std::unique_ptr<std::vector<WasmCode*>> lazy_compile_stubs_;
uint32_t num_imported_functions_;
+ // Maps from instruction start of an immovable code object to instruction
+ // start of the trampoline.
std::unordered_map<Address, Address, AddressHasher> trampolines_;
+
+ // Maps from stub key to wasm code (containing a copy of that stub).
std::unordered_map<uint32_t, WasmCode*> stubs_;
+ std::unique_ptr<CompilationState, CompilationStateDeleter> compilation_state_;
+
+ // A phantom reference to the {WasmCompiledModule}. It is intentionally not
+ // typed {Handle<WasmCompiledModule>} because this location will be cleared
+ // when the phantom reference is cleared.
+ WasmCompiledModule** compiled_module_ = nullptr;
+
DisjointAllocationPool free_memory_;
DisjointAllocationPool allocated_memory_;
std::list<VirtualMemory> owned_memory_;
WasmCodeManager* wasm_code_manager_;
base::Mutex allocation_mutex_;
- Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
bool is_executable_ = false;
+ bool frozen_ = false;
int modification_scope_depth_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
@@ -343,8 +375,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
private:
friend class NativeModule;
- WasmCodeManager(const WasmCodeManager&) = delete;
- WasmCodeManager& operator=(const WasmCodeManager&) = delete;
void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
bool Commit(Address, size_t);
// Currently, we uncommit a whole module, so all we need is account
@@ -365,6 +395,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// TODO(mtrofin): remove the dependency on isolate.
v8::Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmCodeManager);
};
// Within the scope, the native_module is writable and not executable.
@@ -386,13 +418,6 @@ class NativeModuleModificationScope final {
NativeModule* native_module_;
};
-// Utilities specific to wasm code generation. We embed a tag for call sites -
-// the index of the called function - when serializing and when creating the
-// code, initially. These APIs offer accessors. The implementation has platform
-// specific nuances.
-void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag);
-uint32_t GetWasmCalleeTag(RelocInfo* rinfo);
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index f261f44991..bb3a082699 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -43,81 +43,44 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
class PatchDirectCallsHelper {
public:
- PatchDirectCallsHelper(WasmInstanceObject* instance, const WasmCode* code)
- : source_pos_it(ByteArray::cast(
- instance->compiled_module()->source_positions()->get(
- static_cast<int>(code->index())))),
- decoder(nullptr, nullptr) {
+ PatchDirectCallsHelper(NativeModule* native_module, const WasmCode* code)
+ : source_pos_it(code->source_positions()), decoder(nullptr, nullptr) {
uint32_t func_index = code->index();
- WasmCompiledModule* comp_mod = instance->compiled_module();
+ WasmCompiledModule* comp_mod = native_module->compiled_module();
func_bytes =
comp_mod->shared()->module_bytes()->GetChars() +
comp_mod->shared()->module()->functions[func_index].code.offset();
}
- PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
- : source_pos_it(code->SourcePositionTable()), decoder(nullptr, nullptr) {
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(2, deopt_data->length());
- WasmSharedModuleData* shared = instance->compiled_module()->shared();
- int func_index = Smi::ToInt(deopt_data->get(1));
- func_bytes = shared->module_bytes()->GetChars() +
- shared->module()->functions[func_index].code.offset();
- }
-
SourcePositionTableIterator source_pos_it;
Decoder decoder;
const byte* func_bytes;
};
-bool IsAtWasmDirectCallTarget(RelocIterator& it) {
- DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
- Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- return code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION ||
- code->kind() == Code::WASM_TO_WASM_FUNCTION ||
- code->kind() == Code::WASM_INTERPRETER_ENTRY ||
- code->builtin_index() == Builtins::kIllegal ||
- code->builtin_index() == Builtins::kWasmCompileLazy;
-}
-
} // namespace
-CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone) {}
+CodeSpecialization::CodeSpecialization() {}
CodeSpecialization::~CodeSpecialization() {}
-void CodeSpecialization::RelocateWasmContextReferences(Address new_context) {
- DCHECK_NOT_NULL(new_context);
- DCHECK_NULL(new_wasm_context_address_);
- new_wasm_context_address_ = new_context;
-}
-
-void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
- DCHECK(old_function_table_size_ == 0 && new_function_table_size_ == 0);
- old_function_table_size_ = old_size;
- new_function_table_size_ = new_size;
-}
-
-void CodeSpecialization::RelocateDirectCalls(
- Handle<WasmInstanceObject> instance) {
- DCHECK(relocate_direct_calls_instance_.is_null());
- DCHECK(!instance.is_null());
- relocate_direct_calls_instance_ = instance;
+void CodeSpecialization::UpdateInstanceReferences(
+ Handle<WeakCell> old_weak_instance, Handle<WeakCell> new_weak_instance) {
+ DCHECK(!old_weak_instance.is_null());
+ DCHECK(!new_weak_instance.is_null());
+ old_weak_instance_ = old_weak_instance;
+ new_weak_instance_ = new_weak_instance;
}
-void CodeSpecialization::RelocatePointer(Address old_ptr, Address new_ptr) {
- DCHECK_EQ(0, pointers_to_relocate_.count(old_ptr));
- DCHECK_EQ(0, pointers_to_relocate_.count(new_ptr));
- pointers_to_relocate_.insert(std::make_pair(old_ptr, new_ptr));
+void CodeSpecialization::RelocateDirectCalls(NativeModule* native_module) {
+ DCHECK_NULL(relocate_direct_calls_module_);
+ DCHECK_NOT_NULL(native_module);
+ relocate_direct_calls_module_ = native_module;
}
-bool CodeSpecialization::ApplyToWholeInstance(
- WasmInstanceObject* instance, ICacheFlushMode icache_flush_mode) {
+bool CodeSpecialization::ApplyToWholeModule(NativeModule* native_module,
+ ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module = instance->compiled_module();
- NativeModule* native_module = compiled_module->GetNativeModule();
- FixedArray* code_table = compiled_module->code_table();
+ WasmCompiledModule* compiled_module = native_module->compiled_module();
WasmSharedModuleData* shared = compiled_module->shared();
WasmModule* module = shared->module();
std::vector<WasmFunction>* wasm_functions = &shared->module()->functions;
@@ -130,157 +93,105 @@ bool CodeSpecialization::ApplyToWholeInstance(
// Patch all wasm functions.
for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
func_index < num_wasm_functions; ++func_index) {
- WasmCodeWrapper wrapper;
- if (FLAG_wasm_jit_to_native) {
- const WasmCode* wasm_function = native_module->GetCode(func_index);
- if (wasm_function->kind() != WasmCode::kFunction) {
- continue;
- }
- wrapper = WasmCodeWrapper(wasm_function);
- } else {
- Code* wasm_function = Code::cast(code_table->get(func_index));
- if (wasm_function->kind() != Code::WASM_FUNCTION) continue;
- wrapper = WasmCodeWrapper(handle(wasm_function));
+ WasmCode* wasm_function = native_module->GetCode(func_index);
+ // TODO(clemensh): Get rid of this nullptr check
+ if (wasm_function == nullptr ||
+ wasm_function->kind() != WasmCode::kFunction) {
+ continue;
}
- changed |= ApplyToWasmCode(wrapper, icache_flush_mode);
+ changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
+ bool patch_wasm_weak_instances =
+ !old_weak_instance_.is_identical_to(new_weak_instance_);
+
// Patch all exported functions (JS_TO_WASM_FUNCTION).
int reloc_mode = 0;
- // We need to patch WASM_CONTEXT_REFERENCE to put the correct address.
- if (new_wasm_context_address_) {
- reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CONTEXT_REFERENCE);
- }
// Patch CODE_TARGET if we shall relocate direct calls. If we patch direct
- // calls, the instance registered for that (relocate_direct_calls_instance_)
+ // calls, the instance registered for that (relocate_direct_calls_module_)
// should match the instance we currently patch (instance).
- if (!relocate_direct_calls_instance_.is_null()) {
- DCHECK_EQ(instance, *relocate_direct_calls_instance_);
- reloc_mode |=
- RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
- : RelocInfo::CODE_TARGET);
+ if (relocate_direct_calls_module_ != nullptr) {
+ DCHECK_EQ(native_module, relocate_direct_calls_module_);
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
+ }
+ // Instance references are simply embedded objects.
+ if (patch_wasm_weak_instances) {
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
}
if (!reloc_mode) return changed;
int wrapper_index = 0;
for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
Code* export_wrapper =
- Code::cast(compiled_module->export_wrappers()->get(wrapper_index));
- DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ Code::cast(compiled_module->export_wrappers()->get(wrapper_index++));
+ if (export_wrapper->kind() != Code::JS_TO_WASM_FUNCTION) continue;
for (RelocIterator it(export_wrapper, reloc_mode); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
- case RelocInfo::WASM_CONTEXT_REFERENCE:
- it.rinfo()->set_wasm_context_reference(new_wasm_context_address_,
- icache_flush_mode);
- break;
case RelocInfo::JS_TO_WASM_CALL: {
- DCHECK(FLAG_wasm_jit_to_native);
- const WasmCode* new_code = native_module->GetCode(exp.index);
+ changed = true;
+ const WasmCode* new_code =
+ native_module->GetIndirectlyCallableCode(exp.index);
it.rinfo()->set_js_to_wasm_address(new_code->instructions().start(),
- SKIP_ICACHE_FLUSH);
+ icache_flush_mode);
} break;
- case RelocInfo::CODE_TARGET: {
- DCHECK(!FLAG_wasm_jit_to_native);
- // Ignore calls to other builtins like ToNumber.
- if (!IsAtWasmDirectCallTarget(it)) continue;
- Code* new_code = Code::cast(code_table->get(exp.index));
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
+ case RelocInfo::EMBEDDED_OBJECT: {
+ changed = true;
+ const HeapObject* old = it.rinfo()->target_object();
+ if (*old_weak_instance_ == old) {
+ it.rinfo()->set_target_object(
+ *new_weak_instance_, WriteBarrierMode::UPDATE_WRITE_BARRIER,
+ icache_flush_mode);
+ }
} break;
default:
UNREACHABLE();
}
}
- changed = true;
- ++wrapper_index;
}
DCHECK_EQ(module->functions.size(), func_index);
DCHECK_EQ(compiled_module->export_wrappers()->length(), wrapper_index);
return changed;
}
-bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
+bool CodeSpecialization::ApplyToWasmCode(wasm::WasmCode* code,
ICacheFlushMode icache_flush_mode) {
DisallowHeapAllocation no_gc;
- if (code.IsCodeObject()) {
- DCHECK_EQ(Code::WASM_FUNCTION, code.GetCode()->kind());
- } else {
- DCHECK_EQ(wasm::WasmCode::kFunction, code.GetWasmCode()->kind());
- }
+ DCHECK_EQ(wasm::WasmCode::kFunction, code->kind());
- bool patch_table_size = old_function_table_size_ || new_function_table_size_;
- bool reloc_direct_calls = !relocate_direct_calls_instance_.is_null();
- bool reloc_pointers = pointers_to_relocate_.size() > 0;
+ bool reloc_direct_calls = relocate_direct_calls_module_ != nullptr;
int reloc_mode = 0;
auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
};
- add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
- if (code.IsCodeObject()) {
- add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
- } else {
- add_mode(reloc_direct_calls, RelocInfo::WASM_CALL);
- }
- add_mode(reloc_pointers, RelocInfo::WASM_GLOBAL_HANDLE);
+ add_mode(reloc_direct_calls, RelocInfo::WASM_CALL);
+
+ // Always patch the code table entry address which is used in Liftoff
+ // prologue to jump to optimized code if existent.
+ reloc_mode |= RelocInfo::ModeMask(RelocInfo::WASM_CODE_TABLE_ENTRY);
base::Optional<PatchDirectCallsHelper> patch_direct_calls_helper;
bool changed = false;
- NativeModule* native_module =
- code.IsCodeObject() ? nullptr : code.GetWasmCode()->owner();
+ NativeModule* native_module = code->native_module();
- RelocIterator it =
- code.IsCodeObject()
- ? RelocIterator(*code.GetCode(), reloc_mode)
- : RelocIterator(code.GetWasmCode()->instructions(),
- code.GetWasmCode()->reloc_info(),
- code.GetWasmCode()->constant_pool(), reloc_mode);
+ RelocIterator it(code->instructions(), code->reloc_info(),
+ code->constant_pool(), reloc_mode);
for (; !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
switch (mode) {
- case RelocInfo::CODE_TARGET: {
- DCHECK(!FLAG_wasm_jit_to_native);
- DCHECK(reloc_direct_calls);
- // Skip everything which is not a wasm call (stack checks, traps, ...).
- if (!IsAtWasmDirectCallTarget(it)) continue;
- // Iterate simultaneously over the relocation information and the source
- // position table. For each call in the reloc info, move the source
- // position iterator forward to that position to find the byte offset of
- // the respective call. Then extract the call index from the module wire
- // bytes to find the new compiled function.
- size_t offset = it.rinfo()->pc() - code.GetCode()->instruction_start();
- if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
- *code.GetCode());
- }
- int byte_pos = AdvanceSourcePositionTableIterator(
- patch_direct_calls_helper->source_pos_it, offset);
- int called_func_index = ExtractDirectCallIndex(
- patch_direct_calls_helper->decoder,
- patch_direct_calls_helper->func_bytes + byte_pos);
- FixedArray* code_table =
- relocate_direct_calls_instance_->compiled_module()->code_table();
- Code* new_code = Code::cast(code_table->get(called_func_index));
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER, icache_flush_mode);
- changed = true;
- } break;
case RelocInfo::WASM_CALL: {
- DCHECK(FLAG_wasm_jit_to_native);
DCHECK(reloc_direct_calls);
// Iterate simultaneously over the relocation information and the source
// position table. For each call in the reloc info, move the source
// position iterator forward to that position to find the byte offset of
// the respective call. Then extract the call index from the module wire
// bytes to find the new compiled function.
- size_t offset =
- it.rinfo()->pc() - code.GetWasmCode()->instructions().start();
+ size_t offset = it.rinfo()->pc() - code->instructions().start();
if (!patch_direct_calls_helper) {
- patch_direct_calls_helper.emplace(*relocate_direct_calls_instance_,
- code.GetWasmCode());
+ patch_direct_calls_helper.emplace(relocate_direct_calls_module_,
+ code);
}
int byte_pos = AdvanceSourcePositionTableIterator(
patch_direct_calls_helper->source_pos_it, offset);
@@ -292,23 +203,16 @@ bool CodeSpecialization::ApplyToWasmCode(WasmCodeWrapper code,
icache_flush_mode);
changed = true;
} break;
- case RelocInfo::WASM_GLOBAL_HANDLE: {
- DCHECK(reloc_pointers);
- Address old_ptr = it.rinfo()->global_handle();
- auto entry = pointers_to_relocate_.find(old_ptr);
- if (entry != pointers_to_relocate_.end()) {
- Address new_ptr = entry->second;
- it.rinfo()->set_global_handle(new_ptr, icache_flush_mode);
- changed = true;
- }
- } break;
- case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
- DCHECK(patch_table_size);
- it.rinfo()->update_wasm_function_table_size_reference(
- old_function_table_size_, new_function_table_size_,
+ case RelocInfo::WASM_CODE_TABLE_ENTRY: {
+ DCHECK(FLAG_wasm_tier_up);
+ WasmCode* const* code_table_entry =
+ native_module->code_table().data() + code->index();
+ it.rinfo()->set_wasm_code_table_entry(
+ const_cast<Address>(
+ reinterpret_cast<byte const*>(code_table_entry)),
icache_flush_mode);
- changed = true;
- break;
+ } break;
+
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/wasm/wasm-code-specialization.h b/deps/v8/src/wasm/wasm-code-specialization.h
index bed565cf05..20977d70d5 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.h
+++ b/deps/v8/src/wasm/wasm-code-specialization.h
@@ -6,7 +6,6 @@
#define V8_WASM_WASM_CODE_SPECIALIZATION_H_
#include "src/assembler.h"
-#include "src/identity-map.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-objects.h"
@@ -25,36 +24,27 @@ uint32_t ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc);
// Patch* methods, then apply all changes in one step using the Apply* methods.
class CodeSpecialization {
public:
- CodeSpecialization(Isolate*, Zone*);
+ CodeSpecialization();
~CodeSpecialization();
- // Update WasmContext references.
- void RelocateWasmContextReferences(Address new_context);
- // Update function table size.
- // TODO(wasm): Prepare this for more than one indirect function table.
- void PatchTableSize(uint32_t old_size, uint32_t new_size);
+ // Update instance references in code. Instance references should only
+ // appear in export wrappers.
+ void UpdateInstanceReferences(Handle<WeakCell> old_weak_instance,
+ Handle<WeakCell> new_weak_instance);
// Update all direct call sites based on the code table in the given instance.
- void RelocateDirectCalls(Handle<WasmInstanceObject> instance);
- // Relocate an arbitrary object (e.g. function table).
- void RelocatePointer(Address old_obj, Address new_obj);
-
+ void RelocateDirectCalls(NativeModule* module);
// Apply all relocations and patching to all code in the instance (wasm code
// and exported functions).
- bool ApplyToWholeInstance(WasmInstanceObject*,
- ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+ bool ApplyToWholeModule(NativeModule*,
+ ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
// Apply all relocations and patching to one wasm code object.
- bool ApplyToWasmCode(WasmCodeWrapper,
+ bool ApplyToWasmCode(wasm::WasmCode*,
ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
private:
- Address new_wasm_context_address_ = 0;
-
- uint32_t old_function_table_size_ = 0;
- uint32_t new_function_table_size_ = 0;
-
- Handle<WasmInstanceObject> relocate_direct_calls_instance_;
-
- std::unordered_map<Address, Address> pointers_to_relocate_;
+ Handle<WeakCell> old_weak_instance_;
+ Handle<WeakCell> new_weak_instance_;
+ NativeModule* relocate_direct_calls_module_ = nullptr;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.cc b/deps/v8/src/wasm/wasm-code-wrapper.cc
deleted file mode 100644
index c9eee24f3d..0000000000
--- a/deps/v8/src/wasm/wasm-code-wrapper.cc
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-code-wrapper.h"
-
-#include "src/objects-inl.h"
-#include "src/objects/code.h"
-#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// When constructing, we check the flag. After that, we just
-// check using the member.
-WasmCodeWrapper::WasmCodeWrapper(Handle<Code> code) {
- DCHECK(!FLAG_wasm_jit_to_native);
- code_ptr_.code_handle_ = code.location();
-}
-
-WasmCodeWrapper::WasmCodeWrapper(const wasm::WasmCode* code) {
- DCHECK(FLAG_wasm_jit_to_native);
- code_ptr_.wasm_code_ = code;
-}
-
-Handle<Code> WasmCodeWrapper::GetCode() const {
- DCHECK(IsCodeObject());
- return Handle<Code>(code_ptr_.code_handle_);
-}
-
-const wasm::WasmCode* WasmCodeWrapper::GetWasmCode() const {
- DCHECK(!IsCodeObject());
- return code_ptr_.wasm_code_;
-}
-
-bool WasmCodeWrapper::IsCodeObject() const { return !FLAG_wasm_jit_to_native; }
-
-#ifdef ENABLE_DISASSEMBLER
-void WasmCodeWrapper::Disassemble(const char* name, Isolate* isolate,
- std::ostream& os) const {
- if (IsCodeObject()) {
- GetCode()->Disassemble(name, os);
- } else {
- GetWasmCode()->Disassemble(name, isolate, os);
- }
-}
-#endif
-
-bool WasmCodeWrapper::is_liftoff() const {
- return IsCodeObject() ? !GetCode()->is_turbofanned()
- : GetWasmCode()->is_liftoff();
-}
-
-Vector<uint8_t> WasmCodeWrapper::instructions() const {
- if (!IsCodeObject()) return GetWasmCode()->instructions();
- Handle<Code> code = GetCode();
- return {code->instruction_start(),
- static_cast<size_t>(code->instruction_size())};
-}
-
-WasmInstanceObject* WasmCodeWrapper::wasm_instance() const {
- if (IsCodeObject()) {
- WeakCell* weak_instance =
- WeakCell::cast(GetCode()->deoptimization_data()->get(0));
- return WasmInstanceObject::cast(weak_instance->value());
- }
- return GetWasmCode()->owner()->compiled_module()->owning_instance();
-}
-
-WasmContext* WasmCodeWrapper::wasm_context() const {
- return wasm_instance()->wasm_context()->get();
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-code-wrapper.h b/deps/v8/src/wasm/wasm-code-wrapper.h
deleted file mode 100644
index d51bc085aa..0000000000
--- a/deps/v8/src/wasm/wasm-code-wrapper.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_WASM_WASM_CODE_WRAPPER_H_
-#define V8_WASM_WASM_CODE_WRAPPER_H_
-
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-class WasmCode;
-} // namespace wasm
-
-class Code;
-struct WasmContext;
-class WasmInstanceObject;
-
-// TODO(mtrofin): remove once we remove FLAG_wasm_jit_to_native
-class WasmCodeWrapper {
- public:
- WasmCodeWrapper() {}
-
- explicit WasmCodeWrapper(Handle<Code> code);
- explicit WasmCodeWrapper(const wasm::WasmCode* code);
- Handle<Code> GetCode() const;
- const wasm::WasmCode* GetWasmCode() const;
- bool is_null() const { return code_ptr_.wasm_code_ == nullptr; }
- bool IsCodeObject() const;
- bool is_liftoff() const;
-
- Vector<uint8_t> instructions() const;
-
- WasmInstanceObject* wasm_instance() const;
- WasmContext* wasm_context() const;
-
-#ifdef ENABLE_DISASSEMBLER
- void Disassemble(const char* name, Isolate* isolate, std::ostream& os) const;
-#endif
-
- private:
- union {
- const wasm::WasmCode* wasm_code_;
- Code** code_handle_;
- } code_ptr_ = {};
-};
-
-} // namespace internal
-} // namespace v8
-#endif // V8_WASM_WASM_CODE_WRAPPER_H_
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index 932501d776..dc6f1ee675 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -20,7 +20,8 @@ enum ValueTypeCode : uint8_t {
kLocalI64 = 0x7e,
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
- kLocalS128 = 0x7b
+ kLocalS128 = 0x7b,
+ kLocalAnyRef = 0x6f
};
// Binary encoding of other types.
constexpr uint8_t kWasmFunctionTypeCode = 0x60;
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 08d436ffa4..33e82e2158 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -10,8 +10,8 @@
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
-#include "src/factory.h"
#include "src/frames-inl.h"
+#include "src/heap/factory.h"
#include "src/identity-map.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
@@ -140,12 +140,13 @@ class InterpreterHandle {
}
public:
+ // TODO(wasm): properly handlify this constructor.
InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
: isolate_(isolate),
module_(
debug_info->wasm_instance()->compiled_module()->shared()->module()),
interpreter_(isolate, module_, GetBytes(debug_info),
- debug_info->wasm_instance()->wasm_context()->get()) {}
+ handle(debug_info->wasm_instance())) {}
~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
@@ -197,8 +198,6 @@ class InterpreterHandle {
uint32_t activation_id = StartActivation(frame_pointer);
- WasmInterpreter::HeapObjectsScope heap_objects_scope(&interpreter_,
- instance_object);
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
thread->InitFrame(&module()->functions[func_index], wasm_args.start());
bool finished = false;
@@ -293,8 +292,7 @@ class InterpreterHandle {
// Check that this is indeed the instance which is connected to this
// interpreter.
DCHECK_EQ(this, Managed<wasm::InterpreterHandle>::cast(
- instance_obj->debug_info()->get(
- WasmDebugInfo::kInterpreterHandleIndex))
+ instance_obj->debug_info()->interpreter_handle())
->get());
return instance_obj;
}
@@ -534,25 +532,24 @@ namespace {
wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandleIndex),
- isolate);
+ Handle<Object> handle(debug_info->interpreter_handle(), isolate);
if (handle->IsUndefined(isolate)) {
handle = Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate,
*debug_info);
- debug_info->set(WasmDebugInfo::kInterpreterHandleIndex, *handle);
+ debug_info->set_interpreter_handle(*handle);
}
return Handle<Managed<wasm::InterpreterHandle>>::cast(handle)->get();
}
wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
+ Object* handle_obj = debug_info->interpreter_handle();
DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
- Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandleIndex);
+ Object* handle_obj = debug_info->interpreter_handle();
if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
return Managed<wasm::InterpreterHandle>::cast(handle_obj)->get();
}
@@ -566,31 +563,16 @@ int GetNumFunctions(WasmInstanceObject* instance) {
Handle<FixedArray> GetOrCreateInterpretedFunctions(
Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<Object> obj(debug_info->get(WasmDebugInfo::kInterpretedFunctionsIndex),
- isolate);
+ Handle<Object> obj(debug_info->interpreted_functions(), isolate);
if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(
GetNumFunctions(debug_info->wasm_instance()));
- debug_info->set(WasmDebugInfo::kInterpretedFunctionsIndex, *new_arr);
+ debug_info->set_interpreted_functions(*new_arr);
return new_arr;
}
using CodeRelocationMap = std::map<Address, Address>;
-using CodeRelocationMapGC =
- IdentityMap<Handle<Code>, FreeStoreAllocationPolicy>;
-
-void RedirectCallsitesInCodeGC(Code* code, CodeRelocationMapGC& map) {
- DisallowHeapAllocation no_gc;
- for (RelocIterator it(code, RelocInfo::kCodeTargetMask); !it.done();
- it.next()) {
- DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
- Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- Handle<Code>* new_target = map.Find(target);
- if (!new_target) continue;
- it.rinfo()->set_target_address((*new_target)->instruction_start());
- }
-}
void RedirectCallsitesInCode(Isolate* isolate, const wasm::WasmCode* code,
CodeRelocationMap* map) {
@@ -618,28 +600,6 @@ void RedirectCallsitesInJSWrapperCode(Isolate* isolate, Code* code,
}
}
-void RedirectCallsitesInInstanceGC(Isolate* isolate,
- WasmInstanceObject* instance,
- CodeRelocationMapGC& map) {
- DisallowHeapAllocation no_gc;
- // Redirect all calls in wasm functions.
- FixedArray* code_table = instance->compiled_module()->code_table();
- for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
- RedirectCallsitesInCodeGC(Code::cast(code_table->get(i)), map);
- }
- // TODO(6668): Find instances that imported our code and also patch those.
-
- // Redirect all calls in exported functions.
- FixedArray* weak_exported_functions =
- instance->compiled_module()->weak_exported_functions();
- for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
- WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
- if (weak_function->cleared()) continue;
- Code* code = JSFunction::cast(weak_function->value())->code();
- RedirectCallsitesInCodeGC(code, map);
- }
-}
-
void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
CodeRelocationMap* map) {
DisallowHeapAllocation no_gc;
@@ -652,12 +612,9 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
// TODO(6668): Find instances that imported our code and also patch those.
// Redirect all calls in exported functions.
- FixedArray* weak_exported_functions =
- instance->compiled_module()->weak_exported_functions();
- for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
- WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
- if (weak_function->cleared()) continue;
- Code* code = JSFunction::cast(weak_function->value())->code();
+ FixedArray* export_wrapper = instance->compiled_module()->export_wrappers();
+ for (int i = 0, e = export_wrapper->length(); i != e; ++i) {
+ Code* code = Code::cast(export_wrapper->get(i));
RedirectCallsitesInJSWrapperCode(isolate, code, map);
}
}
@@ -667,9 +624,9 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
DCHECK(!instance->has_debug_info());
Factory* factory = instance->GetIsolate()->factory();
- Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
- arr->set(kInstanceIndex, *instance);
- Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(arr);
+ Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
+ factory->NewStruct(WASM_DEBUG_INFO_TYPE, TENURED));
+ debug_info->set_wasm_instance(*instance);
instance->set_debug_info(*debug_info);
return debug_info;
}
@@ -680,33 +637,12 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
Isolate* isolate = instance_obj->GetIsolate();
auto interp_handle =
Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
- debug_info->set(kInterpreterHandleIndex, *interp_handle);
+ debug_info->set_interpreter_handle(*interp_handle);
auto ret = interp_handle->get()->interpreter();
ret->SetCallIndirectTestMode();
return ret;
}
-bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
- if (!object->IsFixedArray()) return false;
- FixedArray* arr = FixedArray::cast(object);
- if (arr->length() != kFieldCount) return false;
- if (!arr->get(kInstanceIndex)->IsWasmInstanceObject()) return false;
- Isolate* isolate = arr->GetIsolate();
- if (!arr->get(kInterpreterHandleIndex)->IsUndefined(isolate) &&
- !arr->get(kInterpreterHandleIndex)->IsForeign())
- return false;
- return true;
-}
-
-WasmDebugInfo* WasmDebugInfo::cast(Object* object) {
- DCHECK(IsWasmDebugInfo(object));
- return reinterpret_cast<WasmDebugInfo*>(object);
-}
-
-WasmInstanceObject* WasmDebugInfo::wasm_instance() {
- return WasmInstanceObject::cast(get(kInstanceIndex));
-}
-
void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
int func_index, int offset) {
Isolate* isolate = debug_info->GetIsolate();
@@ -729,9 +665,6 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
wasm::WasmModule* module = instance->module();
CodeRelocationMap code_to_relocate;
- Handle<FixedArray> code_table(instance->compiled_module()->code_table(),
- isolate);
- CodeRelocationMapGC code_to_relocate_gc(isolate->heap());
// We may modify js wrappers, as well as wasm functions. Hence the 2
// modification scopes.
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -744,31 +677,20 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
- isolate, func_index, module->functions[func_index].sig, instance);
- if (FLAG_wasm_jit_to_native) {
- const wasm::WasmCode* wasm_new_code =
- native_module->AddInterpreterWrapper(new_code, func_index);
- const wasm::WasmCode* old_code =
- native_module->GetCode(static_cast<uint32_t>(func_index));
- Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
- wasm_new_code->instructions().start(), TENURED);
- interpreted_functions->set(func_index, *foreign_holder);
- DCHECK_EQ(0, code_to_relocate.count(old_code->instructions().start()));
- code_to_relocate.insert(
- std::make_pair(old_code->instructions().start(),
- wasm_new_code->instructions().start()));
- } else {
- Code* old_code = Code::cast(code_table->get(func_index));
- interpreted_functions->set(func_index, *new_code);
- DCHECK_NULL(code_to_relocate_gc.Find(old_code));
- code_to_relocate_gc.Set(old_code, new_code);
- }
- }
- if (FLAG_wasm_jit_to_native) {
- RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
- } else {
- RedirectCallsitesInInstanceGC(isolate, *instance, code_to_relocate_gc);
- }
+ isolate, func_index, module->functions[func_index].sig);
+ const wasm::WasmCode* wasm_new_code =
+ native_module->AddInterpreterWrapper(new_code, func_index);
+ const wasm::WasmCode* old_code =
+ native_module->GetCode(static_cast<uint32_t>(func_index));
+ Handle<Foreign> foreign_holder = isolate->factory()->NewForeign(
+ wasm_new_code->instructions().start(), TENURED);
+ interpreted_functions->set(func_index, *foreign_holder);
+ DCHECK_EQ(0, code_to_relocate.count(old_code->instructions().start()));
+ code_to_relocate.insert(
+ std::make_pair(old_code->instructions().start(),
+ wasm_new_code->instructions().start()));
+ }
+ RedirectCallsitesInInstance(isolate, *instance, &code_to_relocate);
}
void WasmDebugInfo::PrepareStep(StepAction step_action) {
@@ -851,16 +773,13 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
Handle<Code> new_entry_code = compiler::CompileCWasmEntry(isolate, sig);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, new_entry_code, false);
- shared->set_internal_formal_parameter_count(
- compiler::CWasmEntryParameters::kNumParameters);
NewFunctionArgs args = NewFunctionArgs::ForWasm(
name, new_entry_code, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
new_entry->set_context(
debug_info->wasm_instance()->compiled_module()->native_context());
- new_entry->set_shared(*shared);
+ new_entry->shared()->set_internal_formal_parameter_count(
+ compiler::CWasmEntryParameters::kNumParameters);
entries->set(index, *new_entry);
}
return handle(JSFunction::cast(entries->get(index)));
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 460742d15a..ad50765a38 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -119,6 +119,24 @@ void WasmEngine::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
promise);
}
+void WasmEngine::Register(CancelableTaskManager* task_manager) {
+ task_managers_.emplace_back(task_manager);
+}
+
+void WasmEngine::Unregister(CancelableTaskManager* task_manager) {
+ task_managers_.remove(task_manager);
+}
+
+void WasmEngine::TearDown() {
+ // Cancel all registered task managers.
+ for (auto task_manager : task_managers_) {
+ task_manager->CancelAndWait();
+ }
+
+ // Cancel all AsyncCompileJobs.
+ compilation_manager_.TearDown();
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 8a698c83b9..b1a5a0a9b9 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -71,12 +71,24 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager* code_manager() const { return code_manager_.get(); }
- WasmAllocationTracker* allocation_tracker() { return &allocation_tracker_; }
+ WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
+
+ // We register and unregister CancelableTaskManagers that run
+ // isolate-dependent tasks. These tasks need to be shutdown if the isolate is
+ // shut down.
+ void Register(CancelableTaskManager* task_manager);
+ void Unregister(CancelableTaskManager* task_manager);
+
+ void TearDown();
private:
CompilationManager compilation_manager_;
std::unique_ptr<WasmCodeManager> code_manager_;
- WasmAllocationTracker allocation_tracker_;
+ WasmMemoryTracker memory_tracker_;
+
+ // Contains all CancelableTaskManagers that run tasks that are dependent
+ // on the isolate.
+ std::list<CancelableTaskManager*> task_managers_;
DISALLOW_COPY_AND_ASSIGN(WasmEngine);
};
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 3bcb1b5ef6..b158e2c44f 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -634,43 +634,6 @@ const char* OpcodeName(uint32_t val) {
return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
}
-// Unwrap a wasm to js wrapper, return the callable heap object.
-// If the wrapper would throw a TypeError, return a null handle.
-Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
- WasmCodeWrapper wrapper) {
- Handle<FixedArray> js_imports_table;
- int index = 0;
- if (wrapper.IsCodeObject()) {
- Handle<Code> js_wrapper = wrapper.GetCode();
- DCHECK(Code::WASM_TO_JS_FUNCTION == js_wrapper->kind());
- Handle<FixedArray> deopt_data(js_wrapper->deoptimization_data(), isolate);
- DCHECK_EQ(2, deopt_data->length());
- intptr_t js_imports_table_loc = static_cast<intptr_t>(
- HeapNumber::cast(deopt_data->get(0))->value_as_bits());
- js_imports_table = Handle<FixedArray>(
- reinterpret_cast<FixedArray**>(js_imports_table_loc));
- CHECK(deopt_data->get(1)->ToInt32(&index));
- DCHECK_GT(js_imports_table->length(), index);
- } else {
- const wasm::WasmCode* wasm_code = wrapper.GetWasmCode();
- DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_code->kind());
- js_imports_table = Handle<FixedArray>(wasm_code->owner()
- ->compiled_module()
- ->owning_instance()
- ->js_imports_table());
- index = 1 + 3 * static_cast<int>(wasm_code->index());
- }
- Handle<Object> obj(js_imports_table->get(index), isolate);
- if (obj->IsCallable()) {
- return Handle<HeapObject>::cast(obj);
- } else {
- // If we did not find a callable object, this is an illegal JS import and
- // obj must be undefined.
- DCHECK(obj->IsUndefined(isolate));
- return Handle<HeapObject>::null();
- }
-}
-
class SideTable;
// Code and metadata needed to execute a function.
@@ -965,9 +928,6 @@ class CodeMap {
Zone* zone_;
const WasmModule* module_;
ZoneVector<InterpreterCode> interpreter_code_;
- // This handle is set and reset by the SetInstanceObject() /
- // ClearInstanceObject() method, which is used by the HeapObjectsScope.
- Handle<WasmInstanceObject> instance_;
// TODO(wasm): Remove this testing wart. It is needed because interpreter
// entry stubs are not generated in testing the interpreter in cctests.
bool call_indirect_through_module_ = false;
@@ -995,38 +955,7 @@ class CodeMap {
call_indirect_through_module_ = val;
}
- void SetInstanceObject(Handle<WasmInstanceObject> instance) {
- DCHECK(instance_.is_null());
- instance_ = instance;
- }
-
- void ClearInstanceObject() { instance_ = Handle<WasmInstanceObject>::null(); }
-
const WasmModule* module() const { return module_; }
- bool has_instance() const { return !instance_.is_null(); }
- WasmInstanceObject* instance() const {
- DCHECK(has_instance());
- return *instance_;
- }
- MaybeHandle<WasmInstanceObject> maybe_instance() const {
- return has_instance() ? handle(instance())
- : MaybeHandle<WasmInstanceObject>();
- }
-
- const wasm::WasmCode* GetImportedFunction(uint32_t function_index) {
- DCHECK(has_instance());
- DCHECK_GT(module_->num_imported_functions, function_index);
- const wasm::NativeModule* native_module =
- instance()->compiled_module()->GetNativeModule();
- return native_module->GetCode(function_index);
- }
-
- Code* GetImportedFunctionGC(uint32_t function_index) {
- DCHECK(has_instance());
- DCHECK_GT(module_->num_imported_functions, function_index);
- FixedArray* code_table = instance()->compiled_module()->code_table();
- return Code::cast(code_table->get(static_cast<int>(function_index)));
- }
InterpreterCode* GetCode(const WasmFunction* function) {
InterpreterCode* code = GetCode(function->func_index);
@@ -1105,60 +1034,6 @@ class CodeMap {
}
};
-Handle<Object> WasmValueToNumber(Factory* factory, WasmValue val,
- wasm::ValueType type) {
- switch (type) {
- case kWasmI32:
- return factory->NewNumberFromInt(val.to<int32_t>());
- case kWasmI64:
- // wasm->js and js->wasm is illegal for i64 type.
- UNREACHABLE();
- case kWasmF32:
- return factory->NewNumber(val.to<float>());
- case kWasmF64:
- return factory->NewNumber(val.to<double>());
- default:
- // TODO(wasm): Implement simd.
- UNIMPLEMENTED();
- return Handle<Object>::null();
- }
-}
-
-// Convert JS value to WebAssembly, spec here:
-// https://github.com/WebAssembly/design/blob/master/JS.md#towebassemblyvalue
-// Return WasmValue() (i.e. of type kWasmStmt) on failure. In that case, an
-// exception will be pending on the isolate.
-WasmValue ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
- wasm::ValueType type) {
- switch (type) {
- case kWasmI32: {
- MaybeHandle<Object> maybe_i32 = Object::ToInt32(isolate, value);
- if (maybe_i32.is_null()) return {};
- int32_t value;
- CHECK(maybe_i32.ToHandleChecked()->ToInt32(&value));
- return WasmValue(value);
- }
- case kWasmI64:
- // If the signature contains i64, a type error was thrown before.
- UNREACHABLE();
- case kWasmF32: {
- MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- if (maybe_number.is_null()) return {};
- return WasmValue(
- static_cast<float>(maybe_number.ToHandleChecked()->Number()));
- }
- case kWasmF64: {
- MaybeHandle<Object> maybe_number = Object::ToNumber(value);
- if (maybe_number.is_null()) return {};
- return WasmValue(maybe_number.ToHandleChecked()->Number());
- }
- default:
- // TODO(wasm): Handle simd.
- UNIMPLEMENTED();
- return WasmValue();
- }
-}
-
// Like a static_cast from src to dst, but specialized for boxed floats.
template <typename dst, typename src>
struct converter {
@@ -1204,9 +1079,10 @@ class ThreadImpl {
};
public:
- ThreadImpl(Zone* zone, CodeMap* codemap, WasmContext* wasm_context)
+ ThreadImpl(Zone* zone, CodeMap* codemap,
+ Handle<WasmInstanceObject> instance_object)
: codemap_(codemap),
- wasm_context_(wasm_context),
+ instance_object_(instance_object),
zone_(zone),
frames_(zone),
activations_(zone) {}
@@ -1366,7 +1242,7 @@ class ThreadImpl {
friend class InterpretedFrameImpl;
CodeMap* codemap_;
- WasmContext* wasm_context_;
+ Handle<WasmInstanceObject> instance_object_;
Zone* zone_;
WasmValue* stack_start_ = nullptr; // Start of allocated stack space.
WasmValue* stack_limit_ = nullptr; // End of allocated stack space.
@@ -1497,7 +1373,7 @@ class ThreadImpl {
// Returns true if the call was successful, false if the stack check failed
// and the current activation was fully unwound.
bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
- pc_t* limit) WARN_UNUSED_RESULT {
+ pc_t* limit) V8_WARN_UNUSED_RESULT {
frames_.back().pc = *pc;
PushFrame(target);
if (!DoStackCheck()) return false;
@@ -1523,14 +1399,14 @@ class ThreadImpl {
template <typename mtype>
inline byte* BoundsCheckMem(uint32_t offset, uint32_t index) {
- uint32_t mem_size = wasm_context_->mem_size;
+ size_t mem_size = instance_object_->memory_size();
if (sizeof(mtype) > mem_size) return nullptr;
if (offset > (mem_size - sizeof(mtype))) return nullptr;
if (index > (mem_size - sizeof(mtype) - offset)) return nullptr;
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
- return wasm_context_->mem_start + offset +
- (index & wasm_context_->mem_mask);
+ return instance_object_->memory_start() + offset +
+ (index & instance_object_->memory_mask());
}
template <typename ctype, typename mtype>
@@ -1554,7 +1430,7 @@ class ThreadImpl {
wasm::MemoryTracingInfo info(operand.offset + index, false, rep);
TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ instance_object_->memory_start());
}
return true;
@@ -1580,7 +1456,7 @@ class ThreadImpl {
wasm::MemoryTracingInfo info(operand.offset + index, true, rep);
TraceMemoryOperation(ExecutionEngine::kInterpreter, &info,
code->function->func_index, static_cast<int>(pc),
- wasm_context_->mem_start);
+ instance_object_->memory_start());
}
return true;
@@ -1723,7 +1599,7 @@ class ThreadImpl {
// fully unwound.
// Do call this function immediately *after* pushing a new frame. The pc of
// the top frame will be reset to 0 if the stack check fails.
- bool DoStackCheck() WARN_UNUSED_RESULT {
+ bool DoStackCheck() V8_WARN_UNUSED_RESULT {
// The goal of this stack check is not to prevent actual stack overflows,
// but to simulate stack overflows during the execution of compiled code.
// That is why this function uses FLAG_stack_size, even though the value
@@ -1735,14 +1611,10 @@ class ThreadImpl {
if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
return true;
}
- if (!codemap()->has_instance()) {
- // In test mode: Just abort.
- FATAL("wasm interpreter: stack overflow");
- }
// The pc of the top frame is initialized to the first instruction. We reset
// it to 0 here such that we report the same position as in compiled code.
frames_.back().pc = 0;
- Isolate* isolate = codemap()->instance()->GetIsolate();
+ Isolate* isolate = instance_object_->GetIsolate();
HandleScope handle_scope(isolate);
isolate->StackOverflow();
return HandleException(isolate) == WasmInterpreter::Thread::HANDLED;
@@ -2024,7 +1896,7 @@ class ThreadImpl {
GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = wasm_context_->globals_start + global->offset;
+ byte* ptr = instance_object_->globals_start() + global->offset;
WasmValue val;
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -2044,7 +1916,7 @@ class ThreadImpl {
GlobalIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
- byte* ptr = wasm_context_->globals_start + global->offset;
+ byte* ptr = instance_object_->globals_start() + global->offset;
WasmValue val = Pop();
switch (global->type) {
#define CASE_TYPE(wasm, ctype) \
@@ -2150,12 +2022,9 @@ class ThreadImpl {
MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Handle<WasmInstanceObject> instance =
- codemap()->maybe_instance().ToHandleChecked();
- DCHECK_EQ(wasm_context_, instance->wasm_context()->get());
- Isolate* isolate = instance->GetIsolate();
- int32_t result =
- WasmInstanceObject::GrowMemory(isolate, instance, delta_pages);
+ Handle<WasmMemoryObject> memory(instance_object_->memory_object());
+ Isolate* isolate = memory->GetIsolate();
+ int32_t result = WasmMemoryObject::Grow(isolate, memory, delta_pages);
Push(WasmValue(result));
len = 1 + operand.length;
// Treat one grow_memory instruction like 1000 other instructions,
@@ -2166,8 +2035,8 @@ class ThreadImpl {
case kExprMemorySize: {
MemoryIndexOperand<Decoder::kNoValidate> operand(&decoder,
code->at(pc));
- Push(WasmValue(
- static_cast<uint32_t>(wasm_context_->mem_size / kWasmPageSize)));
+ Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
+ kWasmPageSize)));
len = 1 + operand.length;
break;
}
@@ -2366,66 +2235,17 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- ExternalCallResult CallExternalJSFunction(Isolate* isolate,
- WasmCodeWrapper code,
- FunctionSig* signature) {
- Handle<HeapObject> target = UnwrapWasmToJSWrapper(isolate, code);
-
- if (target.is_null()) {
+ ExternalCallResult CallExternalWasmFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ const wasm::WasmCode* code, FunctionSig* sig) {
+ if (code->kind() == wasm::WasmCode::kWasmToJsWrapper &&
+ !IsJSCompatibleSignature(sig)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
return TryHandleException(isolate);
}
-#if DEBUG
- std::ostringstream oss;
- target->HeapObjectShortPrint(oss);
- TRACE(" => Calling imported function %s\n", oss.str().c_str());
-#endif
-
- int num_args = static_cast<int>(signature->parameter_count());
-
- // Get all arguments as JS values.
- std::vector<Handle<Object>> args;
- args.reserve(num_args);
- WasmValue* wasm_args = sp_ - num_args;
- for (int i = 0; i < num_args; ++i) {
- args.push_back(WasmValueToNumber(isolate->factory(), wasm_args[i],
- signature->GetParam(i)));
- }
-
- // The receiver is the global proxy if in sloppy mode (default), undefined
- // if in strict mode.
- Handle<Object> receiver = isolate->global_proxy();
- if (target->IsJSFunction() &&
- is_strict(JSFunction::cast(*target)->shared()->language_mode())) {
- receiver = isolate->factory()->undefined_value();
- }
-
- MaybeHandle<Object> maybe_retval =
- Execution::Call(isolate, target, receiver, num_args, args.data());
- if (maybe_retval.is_null()) return TryHandleException(isolate);
-
- Handle<Object> retval = maybe_retval.ToHandleChecked();
- // Pop arguments off the stack.
- sp_ -= num_args;
- // Push return values.
- if (signature->return_count() > 0) {
- // TODO(wasm): Handle multiple returns.
- DCHECK_EQ(1, signature->return_count());
- WasmValue value =
- ToWebAssemblyValue(isolate, retval, signature->GetReturn());
- if (value.type() == kWasmStmt) return TryHandleException(isolate);
- Push(value);
- }
- return {ExternalCallResult::EXTERNAL_RETURNED};
- }
-
- ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
- WasmCodeWrapper code,
- FunctionSig* sig) {
- Handle<WasmDebugInfo> debug_info(codemap()->instance()->debug_info(),
- isolate);
+ Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
Handle<JSFunction> wasm_entry =
WasmDebugInfo::GetCWasmEntry(debug_info, sig);
@@ -2476,7 +2296,7 @@ class ThreadImpl {
arg_buffer.resize(return_size);
}
- // Wrap the arg_buffer data pointer and the WasmContext* in a handle. As
+ // Wrap the arg_buffer data pointer in a handle. As
// this is an aligned pointer, to the GC it will look like a Smi.
Handle<Object> arg_buffer_obj(reinterpret_cast<Object*>(arg_buffer.data()),
isolate);
@@ -2485,15 +2305,9 @@ class ThreadImpl {
static_assert(compiler::CWasmEntryParameters::kNumParameters == 3,
"code below needs adaption");
Handle<Object> args[compiler::CWasmEntryParameters::kNumParameters];
- WasmContext* context = code.wasm_context();
- Handle<Object> context_obj(reinterpret_cast<Object*>(context), isolate);
- DCHECK(!context_obj->IsHeapObject());
- args[compiler::CWasmEntryParameters::kCodeObject] =
- code.IsCodeObject()
- ? Handle<Object>::cast(code.GetCode())
- : Handle<Object>::cast(isolate->factory()->NewForeign(
- code.GetWasmCode()->instructions().start(), TENURED));
- args[compiler::CWasmEntryParameters::kWasmContext] = context_obj;
+ args[compiler::CWasmEntryParameters::kCodeObject] = Handle<Object>::cast(
+ isolate->factory()->NewForeign(code->instructions().start(), TENURED));
+ args[compiler::CWasmEntryParameters::kWasmInstance] = instance;
args[compiler::CWasmEntryParameters::kArgumentsBuffer] = arg_buffer_obj;
Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -2536,68 +2350,23 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- ExternalCallResult CallCodeObject(Isolate* isolate, Handle<Code> code,
- FunctionSig* signature) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
-
- if (code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_WASM_FUNCTION) {
- auto func_info = GetWasmFunctionInfo(isolate, code);
- if (*func_info.instance.ToHandleChecked() != codemap()->instance()) {
- return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
- signature);
- }
- DCHECK_LE(0, func_info.func_index);
- return {ExternalCallResult::INTERNAL,
- codemap()->GetCode(func_info.func_index)};
- }
-
- return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- }
-
- ExternalCallResult CallWasmCode(Isolate* isolate, const wasm::WasmCode* code,
- FunctionSig* signature) {
- DCHECK(AllowHandleAllocation::IsAllowed());
- DCHECK(AllowHeapAllocation::IsAllowed());
-
- if (code->kind() == wasm::WasmCode::kFunction) {
- if (code->owner()->compiled_module()->owning_instance() !=
- codemap()->instance()) {
- return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
- signature);
- }
- return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
- }
-
- if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
- return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
- }
- if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
- code->kind() == wasm::WasmCode::kInterpreterStub) {
- return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
- signature);
- }
- return {ExternalCallResult::INVALID_FUNC};
- }
-
ExternalCallResult CallImportedFunction(uint32_t function_index) {
// Use a new HandleScope to avoid leaking / accumulating handles in the
// outer scope.
- Isolate* isolate = codemap()->instance()->GetIsolate();
+ Isolate* isolate = instance_object_->GetIsolate();
HandleScope handle_scope(isolate);
- if (FLAG_wasm_jit_to_native) {
- const wasm::WasmCode* target =
- codemap()->GetImportedFunction(function_index);
- return CallWasmCode(isolate, target,
- codemap()->module()->functions[function_index].sig);
- } else {
- Handle<Code> target(codemap()->GetImportedFunctionGC(function_index),
- isolate);
- return CallCodeObject(isolate, target,
- codemap()->module()->functions[function_index].sig);
+ DCHECK_GT(module()->num_imported_functions, function_index);
+ Handle<WasmInstanceObject> instance;
+ WasmCode* code;
+ {
+ ImportedFunctionEntry entry(*instance_object_, function_index);
+ instance = handle(entry.instance(), isolate);
+ code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
+ entry.target());
}
+ FunctionSig* sig = codemap()->module()->functions[function_index].sig;
+ return CallExternalWasmFunction(isolate, instance, code, sig);
}
ExternalCallResult CallIndirectFunction(uint32_t table_index,
@@ -2622,81 +2391,51 @@ class ThreadImpl {
return {ExternalCallResult::INTERNAL, code};
}
- WasmCompiledModule* compiled_module =
- codemap()->instance()->compiled_module();
- Isolate* isolate = compiled_module->GetIsolate();
+ Isolate* isolate = instance_object_->GetIsolate();
+ uint32_t expected_sig_id = module()->signature_ids[sig_index];
+ DCHECK_EQ(expected_sig_id,
+ module()->signature_map.Find(module()->signatures[sig_index]));
+
+ // The function table is stored in the instance.
+ // TODO(wasm): the wasm interpreter currently supports only one table.
+ CHECK_EQ(0, table_index);
+ // Bounds check against table size.
+ if (entry_index >= instance_object_->indirect_function_table_size()) {
+ return {ExternalCallResult::INVALID_FUNC};
+ }
- const wasm::WasmCode* target = nullptr;
- Code* target_gc = nullptr;
+ WasmCode* code;
+ Handle<WasmInstanceObject> instance;
{
- DisallowHeapAllocation no_gc;
- // Get function to be called directly from the live instance to see latest
- // changes to the tables.
-
- // Canonicalize signature index.
- uint32_t canonical_sig_index = module()->signature_ids[sig_index];
- DCHECK_EQ(canonical_sig_index,
- module()->signature_map.Find(module()->signatures[sig_index]));
-
- if (!WASM_CONTEXT_TABLES) {
- // Check signature.
- FixedArray* fun_tables = compiled_module->function_tables();
- if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- // Reconstitute the global handle to the function table, from the
- // address stored in the respective table of tables.
- int table_index_as_int = static_cast<int>(table_index);
- FixedArray* fun_table = *reinterpret_cast<FixedArray**>(
- WasmCompiledModule::GetTableValue(fun_tables, table_index_as_int));
- // Function tables store <smi, code> pairs.
- int num_funcs_in_table =
- fun_table->length() / compiler::kFunctionTableEntrySize;
- if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- int found_sig = Smi::ToInt(fun_table->get(
- compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
- if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
-
- // Get code object.
- target_gc = Code::cast(fun_table->get(
- compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
- } else {
- // The function table is stored in the wasm context.
- // TODO(wasm): the wasm interpreter currently supports only one table.
- CHECK_EQ(0, table_index);
- // Bounds check against table size.
- if (entry_index >= wasm_context_->table_size) {
- return {ExternalCallResult::INVALID_FUNC};
- }
- // Signature check.
- int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
- if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
- // Load the target address (first instruction of code).
- Address first_instr = wasm_context_->table[entry_index].target;
- // TODO(titzer): load the wasm context instead of relying on the
- // target code being specialized to the target instance.
- // Get code object.
- target =
- isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
- first_instr);
+ IndirectFunctionTableEntry entry(*instance_object_, entry_index);
+ // Signature check.
+ if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
+ return {ExternalCallResult::SIGNATURE_MISMATCH};
}
+
+ instance = handle(entry.instance(), isolate);
+ code = isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
+ entry.target());
}
- // Call the code object. Use a new HandleScope to avoid leaking /
- // accumulating handles in the outer scope.
- HandleScope handle_scope(isolate);
+ // Call either an internal or external WASM function.
+ HandleScope scope(isolate);
FunctionSig* signature = module()->signatures[sig_index];
- if (FLAG_wasm_jit_to_native) {
- return CallWasmCode(isolate, target, signature);
- } else {
- return CallCodeObject(isolate, handle(target_gc, isolate), signature);
+
+ if (code->kind() == wasm::WasmCode::kFunction) {
+ if (!instance_object_.is_identical_to(instance)) {
+ // Cross instance call.
+ return CallExternalWasmFunction(isolate, instance, code, signature);
+ }
+ return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
+ }
+
+ // Call to external function.
+ if (code->kind() == wasm::WasmCode::kInterpreterStub ||
+ code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
+ return CallExternalWasmFunction(isolate, instance, code, signature);
}
+ return {ExternalCallResult::INVALID_FUNC};
}
inline Activation current_activation() {
@@ -2785,37 +2524,6 @@ const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
return reinterpret_cast<const InterpretedFrameImpl*>(frame);
}
-//============================================================================
-// Implementation details of the heap objects scope.
-//============================================================================
-class HeapObjectsScopeImpl {
- public:
- HeapObjectsScopeImpl(CodeMap* codemap, Handle<WasmInstanceObject> instance)
- : codemap_(codemap), needs_reset(!codemap_->has_instance()) {
- if (needs_reset) {
- instance_ = handle(*instance);
- codemap_->SetInstanceObject(instance_);
- } else {
- DCHECK_EQ(*instance, codemap_->instance());
- return;
- }
- }
-
- ~HeapObjectsScopeImpl() {
- if (!needs_reset) return;
- DCHECK_EQ(*instance_, codemap_->instance());
- codemap_->ClearInstanceObject();
- // Clear the handle, such that anyone who accidentally copied them will
- // notice.
- *instance_.location() = nullptr;
- }
-
- private:
- CodeMap* codemap_;
- Handle<WasmInstanceObject> instance_;
- bool needs_reset;
-};
-
} // namespace
//============================================================================
@@ -2896,23 +2604,42 @@ class WasmInterpreterInternals : public ZoneObject {
WasmInterpreterInternals(Isolate* isolate, Zone* zone,
const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- WasmContext* wasm_context)
+ Handle<WasmInstanceObject> instance_object)
: module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
codemap_(isolate, module, module_bytes_.data(), zone),
threads_(zone) {
- threads_.emplace_back(zone, &codemap_, wasm_context);
+ threads_.emplace_back(zone, &codemap_, instance_object);
}
};
+namespace {
+// TODO(wasm): a finalizer is only required to delete the global handle.
+void GlobalHandleDeleter(const v8::WeakCallbackInfo<void>& data) {
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(
+ reinterpret_cast<JSObject**>(data.GetParameter())));
+}
+
+Handle<WasmInstanceObject> MakeWeak(
+ Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
+ Handle<Object> handle = isolate->global_handles()->Create(*instance_object);
+ // TODO(wasm): use a phantom handle in the WasmInterpreter.
+ GlobalHandles::MakeWeak(handle.location(), handle.location(),
+ &GlobalHandleDeleter,
+ v8::WeakCallbackType::kFinalizer);
+ return Handle<WasmInstanceObject>::cast(handle);
+}
+} // namespace
+
//============================================================================
// Implementation of the public interface of the interpreter.
//============================================================================
WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
const ModuleWireBytes& wire_bytes,
- WasmContext* wasm_context)
+ Handle<WasmInstanceObject> instance_object)
: zone_(isolate->allocator(), ZONE_NAME),
internals_(new (&zone_) WasmInterpreterInternals(
- isolate, &zone_, module, wire_bytes, wasm_context)) {}
+ isolate, &zone_, module, wire_bytes,
+ MakeWeak(isolate, instance_object))) {}
WasmInterpreter::~WasmInterpreter() { internals_->~WasmInterpreterInternals(); }
@@ -2983,7 +2710,7 @@ ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
// Create some dummy structures, to avoid special-casing the implementation
// just for testing.
FunctionSig sig(0, 0, nullptr);
- WasmFunction function{&sig, 0, 0, {0, 0}, {0, 0}, false, false};
+ WasmFunction function{&sig, 0, 0, {0, 0}, false, false};
InterpreterCode code{
&function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
@@ -3018,19 +2745,6 @@ void InterpretedFrame::Deleter::operator()(InterpretedFrame* ptr) {
delete ToImpl(ptr);
}
-//============================================================================
-// Public API of the heap objects scope.
-//============================================================================
-WasmInterpreter::HeapObjectsScope::HeapObjectsScope(
- WasmInterpreter* interpreter, Handle<WasmInstanceObject> instance) {
- static_assert(sizeof(data) == sizeof(HeapObjectsScopeImpl), "Size mismatch");
- new (data) HeapObjectsScopeImpl(&interpreter->internals_->codemap_, instance);
-}
-
-WasmInterpreter::HeapObjectsScope::~HeapObjectsScope() {
- reinterpret_cast<HeapObjectsScopeImpl*>(data)->~HeapObjectsScopeImpl();
-}
-
#undef TRACE
#undef FOREACH_INTERNAL_OPCODE
#undef WASM_CTYPES
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 88d21c37d1..29b78c5a61 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -16,7 +16,6 @@ class AccountingAllocator;
namespace internal {
class WasmInstanceObject;
-struct WasmContext;
namespace wasm {
@@ -88,19 +87,6 @@ class InterpretedFrame {
// An interpreter capable of executing WebAssembly.
class V8_EXPORT_PRIVATE WasmInterpreter {
public:
- // Open a HeapObjectsScope before running any code in the interpreter which
- // needs access to the instance object or needs to call to JS functions.
- class V8_EXPORT_PRIVATE HeapObjectsScope {
- public:
- HeapObjectsScope(WasmInterpreter* interpreter,
- Handle<WasmInstanceObject> instance);
- ~HeapObjectsScope();
-
- private:
- char data[3 * sizeof(void*)]; // must match sizeof(HeapObjectsScopeImpl).
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsScope);
- };
-
// State machine for a Thread:
// +---------Run()/Step()--------+
// V |
@@ -181,7 +167,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
};
WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes, WasmContext* wasm_context);
+ const ModuleWireBytes& wire_bytes,
+ Handle<WasmInstanceObject> instance);
~WasmInterpreter();
//==========================================================================
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 915d4d9ead..13f87ec8df 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -9,11 +9,10 @@
#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/execution.h"
-#include "src/factory.h"
#include "src/handles.h"
+#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/objects.h"
#include "src/parsing/parse-info.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/wasm-engine.h"
@@ -655,10 +654,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
static_cast<size_t>(initial);
const bool enable_guard_regions =
internal::trap_handler::IsTrapHandlerEnabled();
- i::Handle<i::JSArrayBuffer> buffer = i::wasm::NewArrayBuffer(
- i_isolate, size, enable_guard_regions,
- is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared);
- if (buffer.is_null()) {
+ i::SharedFlag shared_flag =
+ is_shared_memory ? i::SharedFlag::kShared : i::SharedFlag::kNotShared;
+ i::Handle<i::JSArrayBuffer> buffer;
+ if (!i::wasm::NewArrayBuffer(i_isolate, size, enable_guard_regions,
+ shared_flag)
+ .ToHandle(&buffer)) {
thrower.RangeError("could not allocate memory");
return;
}
@@ -675,6 +676,113 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
+void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Global()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Global must be invoked with 'new'");
+ return;
+ }
+ if (!args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a global descriptor");
+ return;
+ }
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
+
+ // The descriptor's 'value'.
+ v8::MaybeLocal<v8::Value> maybe_value =
+ descriptor->Get(context, v8_str(isolate, "value"));
+
+ // The descriptor's 'mutable'.
+ bool is_mutable = false;
+ {
+ Local<String> mutable_key = v8_str(isolate, "mutable");
+ v8::MaybeLocal<v8::Value> maybe = descriptor->Get(context, mutable_key);
+ v8::Local<v8::Value> value;
+ if (maybe.ToLocal(&value)) {
+ if (!value->BooleanValue(context).To(&is_mutable)) return;
+ }
+ }
+
+ // The descriptor's 'type'.
+ i::wasm::ValueType type;
+ {
+ v8::MaybeLocal<v8::Value> maybe =
+ descriptor->Get(context, v8_str(isolate, "type"));
+ v8::Local<v8::Value> value;
+ if (!maybe.ToLocal(&value)) return;
+ v8::Local<v8::String> string;
+ if (!value->ToString(context).ToLocal(&string)) return;
+
+ bool equal;
+ if (string->Equals(context, v8_str(isolate, "i32")).To(&equal) && equal) {
+ type = i::wasm::kWasmI32;
+ } else if (string->Equals(context, v8_str(isolate, "f32")).To(&equal) &&
+ equal) {
+ type = i::wasm::kWasmF32;
+ } else if (string->Equals(context, v8_str(isolate, "f64")).To(&equal) &&
+ equal) {
+ type = i::wasm::kWasmF64;
+ } else {
+ thrower.TypeError(
+ "Descriptor property 'type' must be 'i32', 'f32', or 'f64'");
+ return;
+ }
+ }
+
+ const uint32_t offset = 0;
+ i::MaybeHandle<i::WasmGlobalObject> maybe_global_obj =
+ i::WasmGlobalObject::New(i_isolate, i::MaybeHandle<i::JSArrayBuffer>(),
+ type, offset, is_mutable);
+
+ i::Handle<i::WasmGlobalObject> global_obj;
+ if (!maybe_global_obj.ToHandle(&global_obj)) {
+ thrower.RangeError("could not allocate memory");
+ return;
+ }
+
+ // Convert value to a WebAssembly value.
+ v8::Local<v8::Value> value;
+ if (maybe_value.ToLocal(&value)) {
+ switch (type) {
+ case i::wasm::kWasmI32: {
+ int32_t i32_value = 0;
+ v8::Local<v8::Int32> int32_value;
+ if (!value->ToInt32(context).ToLocal(&int32_value)) return;
+ if (!int32_value->Int32Value(context).To(&i32_value)) return;
+ global_obj->SetI32(i32_value);
+ break;
+ }
+ case i::wasm::kWasmF32: {
+ double f64_value = 0;
+ v8::Local<v8::Number> number_value;
+ if (!value->ToNumber(context).ToLocal(&number_value)) return;
+ if (!number_value->NumberValue(context).To(&f64_value)) return;
+ float f32_value = static_cast<float>(f64_value);
+ global_obj->SetF32(f32_value);
+ break;
+ }
+ case i::wasm::kWasmF64: {
+ double f64_value = 0;
+ v8::Local<v8::Number> number_value;
+ if (!value->ToNumber(context).ToLocal(&number_value)) return;
+ if (!number_value->NumberValue(context).To(&f64_value)) return;
+ global_obj->SetF64(f64_value);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ i::Handle<i::JSObject> global_js_object(global_obj);
+ args.GetReturnValue().Set(Utils::ToLocal(global_js_object));
+}
+
+constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
constexpr const char* kName_WasmTableObject = "WebAssembly.Table";
@@ -790,11 +898,8 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Parameter 1.
i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
- // TODO(titzer): use WasmExportedFunction::IsWasmExportedFunction() here.
if (!value->IsNull(i_isolate) &&
- (!value->IsJSFunction() ||
- i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
- i::Code::JS_TO_WASM_FUNCTION)) {
+ !i::WasmExportedFunction::IsWasmExportedFunction(*value)) {
thrower.TypeError("Argument 1 must be null or a WebAssembly function");
return;
}
@@ -889,6 +994,83 @@ void WebAssemblyMemoryGetBuffer(
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(buffer));
}
+
+void WebAssemblyGlobalGetValueCommon(
+ const v8::FunctionCallbackInfo<v8::Value>& args, const char* name) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, name);
+ EXTRACT_THIS(receiver, WasmGlobalObject);
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+
+ switch (receiver->type()) {
+ case i::wasm::kWasmI32:
+ return_value.Set(receiver->GetI32());
+ break;
+ case i::wasm::kWasmI64:
+ thrower.TypeError("Can't get the value of i64 WebAssembly.Global");
+ break;
+ case i::wasm::kWasmF32:
+ return_value.Set(receiver->GetF32());
+ break;
+ case i::wasm::kWasmF64:
+ return_value.Set(receiver->GetF64());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// WebAssembly.Global.valueOf() -> num
+void WebAssemblyGlobalValueOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ return WebAssemblyGlobalGetValueCommon(args, "WebAssembly.Global.valueOf()");
+}
+
+// get WebAssembly.Global.value -> num
+void WebAssemblyGlobalGetValue(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ return WebAssemblyGlobalGetValueCommon(args, "get WebAssembly.Global.value");
+}
+
+// set WebAssembly.Global.value(num)
+void WebAssemblyGlobalSetValue(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+ ScheduledErrorThrower thrower(i_isolate, "set WebAssembly.Global.value");
+ EXTRACT_THIS(receiver, WasmGlobalObject);
+
+ switch (receiver->type()) {
+ case i::wasm::kWasmI32: {
+ int32_t i32_value = 0;
+ if (!args[0]->Int32Value(context).To(&i32_value)) return;
+ receiver->SetI32(i32_value);
+ break;
+ }
+ case i::wasm::kWasmI64:
+ thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
+ break;
+ case i::wasm::kWasmF32: {
+ double f64_value = 0;
+ if (!args[0]->NumberValue(context).To(&f64_value)) return;
+ receiver->SetF32(static_cast<float>(f64_value));
+ break;
+ }
+ case i::wasm::kWasmF64: {
+ double f64_value = 0;
+ if (!args[0]->NumberValue(context).To(&f64_value)) return;
+ receiver->SetF64(f64_value);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
// TODO(titzer): we use the API to create the function template because the
@@ -903,28 +1085,37 @@ static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
namespace internal {
+Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name,
+ FunctionCallback func) {
+ Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
+ Handle<JSFunction> function =
+ ApiNatives::InstantiateFunction(temp, name).ToHandleChecked();
+ DCHECK(function->shared()->HasSharedName());
+ return function;
+}
+
Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
const char* str, FunctionCallback func,
int length = 0) {
Handle<String> name = v8_str(isolate, str);
- Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
- Handle<JSFunction> function =
- ApiNatives::InstantiateFunction(temp, name).ToHandleChecked();
- DCHECK(function->shared()->has_shared_name());
+ Handle<JSFunction> function = CreateFunc(isolate, name, func);
function->shared()->set_length(length);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(object, name, function, attributes);
return function;
}
+Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
+ return Name::ToFunctionName(name, isolate->factory()->get_string())
+ .ToHandleChecked();
+}
+
void InstallGetter(Isolate* isolate, Handle<JSObject> object,
const char* str, FunctionCallback func) {
Handle<String> name = v8_str(isolate, str);
- Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
- // TODO(ishell): shouldn't we set "get "+name as getter's name?
Handle<JSFunction> function =
- ApiNatives::InstantiateFunction(temp).ToHandleChecked();
- DCHECK(function->shared()->has_shared_name());
+ CreateFunc(isolate, GetterName(isolate, name), func);
+
v8::PropertyAttribute attributes =
static_cast<v8::PropertyAttribute>(v8::DontEnum);
Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
@@ -932,6 +1123,28 @@ void InstallGetter(Isolate* isolate, Handle<JSObject> object,
Local<Function>(), attributes);
}
+Handle<String> SetterName(Isolate* isolate, Handle<String> name) {
+ return Name::ToFunctionName(name, isolate->factory()->set_string())
+ .ToHandleChecked();
+}
+
+void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
+ const char* str, FunctionCallback getter,
+ FunctionCallback setter) {
+ Handle<String> name = v8_str(isolate, str);
+ Handle<JSFunction> getter_func =
+ CreateFunc(isolate, GetterName(isolate, name), getter);
+ Handle<JSFunction> setter_func =
+ CreateFunc(isolate, SetterName(isolate, name), setter);
+
+ v8::PropertyAttribute attributes =
+ static_cast<v8::PropertyAttribute>(v8::DontEnum);
+
+ Utils::ToLocal(object)->SetAccessorProperty(
+ Utils::ToLocal(name), Utils::ToLocal(getter_func),
+ Utils::ToLocal(setter_func), attributes);
+}
+
void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<JSGlobalObject> global = isolate->global_object();
Handle<Context> context(global->native_context(), isolate);
@@ -980,9 +1193,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
JSObject::cast(module_constructor->instance_prototype()));
- i::Handle<i::Map> module_map = isolate->factory()->NewMap(
- i::WASM_MODULE_TYPE, i::JSObject::kHeaderSize +
- WasmModuleObject::kFieldCount * i::kPointerSize);
+ i::Handle<i::Map> module_map =
+ isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
1);
@@ -1040,6 +1252,24 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
+ // Setup Global
+ if (i::FLAG_experimental_wasm_mut_global) {
+ Handle<JSFunction> global_constructor =
+ InstallFunc(isolate, webassembly, "Global", WebAssemblyGlobal, 1);
+ context->set_wasm_global_constructor(*global_constructor);
+ JSFunction::EnsureHasInitialMap(global_constructor);
+ Handle<JSObject> global_proto(
+ JSObject::cast(global_constructor->instance_prototype()));
+ i::Handle<i::Map> global_map = isolate->factory()->NewMap(
+ i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
+ JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
+ InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
+ InstallGetterSetter(isolate, global_proto, "value",
+ WebAssemblyGlobalGetValue, WebAssemblyGlobalSetValue);
+ JSObject::AddProperty(global_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Global"), ro_attributes);
+ }
+
// Setup errors
attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 38cd8973a6..9bb8002a4f 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -12,119 +12,256 @@ namespace v8 {
namespace internal {
namespace wasm {
-WasmAllocationTracker::~WasmAllocationTracker() {
+namespace {
+void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
+ size_t size, bool require_guard_regions,
+ void** allocation_base,
+ size_t* allocation_length) {
+#if V8_TARGET_ARCH_32_BIT
+ DCHECK(!require_guard_regions);
+#endif
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ *allocation_length =
+ require_guard_regions
+ ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
+ : RoundUp(
+ base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
+ kWasmPageSize);
+ DCHECK_GE(*allocation_length, size);
+ DCHECK_GE(*allocation_length, kWasmPageSize);
+
+ // Let the WasmMemoryTracker know we are going to reserve a bunch of
+ // address space.
+ // Try up to three times; getting rid of dead JSArrayBuffer allocations might
+ // require two GCs.
+ // TODO(gc): Fix this to only require one GC (crbug.com/v8/7621).
+ for (int trial = 0;; ++trial) {
+ if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
+ // Collect garbage and retry.
+ heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
+ // After first and second GC: retry.
+ if (trial < 2) continue;
+ // We are over the address space limit. Fail.
+ //
+ // When running under the correctness fuzzer (i.e.
+ // --abort-on-stack-or-string-length-overflow is preset), we crash instead
+ // so it is not incorrectly reported as a correctness violation. See
+ // https://crbug.com/828293#c4
+ if (FLAG_abort_on_stack_or_string_length_overflow) {
+ FATAL("could not allocate wasm memory");
+ }
+ return nullptr;
+ }
+
+ // The Reserve makes the whole region inaccessible by default.
+ *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
+ PageAllocator::kNoAccess);
+ if (*allocation_base == nullptr) {
+ memory_tracker->ReleaseReservation(*allocation_length);
+ return nullptr;
+ }
+ void* memory = *allocation_base;
+
+ // Make the part we care about accessible.
+ if (size > 0) {
+ bool result = SetPermissions(memory, RoundUp(size, kWasmPageSize),
+ PageAllocator::kReadWrite);
+ // SetPermissions commits the extra memory, which may put us over the
+ // process memory limit. If so, report this as an OOM.
+ if (!result) {
+ V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
+ }
+ }
+
+ memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
+ memory, size);
+ return memory;
+}
+} // namespace
+
+WasmMemoryTracker::~WasmMemoryTracker() {
+ if (empty_backing_store_.allocation_base != nullptr) {
+ CHECK(FreePages(empty_backing_store_.allocation_base,
+ empty_backing_store_.allocation_length));
+ InternalReleaseAllocation(empty_backing_store_.buffer_start);
+ }
+
// All reserved address space should be released before the allocation tracker
// is destroyed.
+ DCHECK_EQ(reserved_address_space_, 0u);
DCHECK_EQ(allocated_address_space_, 0u);
}
-bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
+bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_64_BIT
- static constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
+ constexpr size_t kAddressSpaceLimit = 0x10000000000L; // 1 TiB
#else
- static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
+ constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
- size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
+ size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
if (old_count + num_bytes <= kAddressSpaceLimit) {
return true;
}
- allocated_address_space_ -= num_bytes;
+ reserved_address_space_ -= num_bytes;
return false;
}
-void WasmAllocationTracker::ReleaseAddressSpace(size_t num_bytes) {
- DCHECK_LE(num_bytes, allocated_address_space_);
- allocated_address_space_ -= num_bytes;
+void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
+ size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
+ USE(old_reserved);
+ DCHECK_LE(num_bytes, old_reserved);
+ DCHECK_GE(old_reserved - num_bytes, allocated_address_space_);
}
-void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool require_guard_regions,
- void** allocation_base,
- size_t* allocation_length) {
- // We always allocate the largest possible offset into the heap, so the
- // addressable memory after the guard page can be made inaccessible.
- *allocation_length = require_guard_regions
- ? RoundUp(kWasmMaxHeapOffset, CommitPageSize())
- : base::bits::RoundUpToPowerOfTwo32(RoundUp(
- static_cast<uint32_t>(size), kWasmPageSize));
- DCHECK_GE(*allocation_length, size);
+void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
+ size_t allocation_length,
+ void* buffer_start,
+ size_t buffer_length) {
+ // Make sure the caller has reserved the address space before registering the
+ // allocation.
+ DCHECK_LE(allocated_address_space_ + allocation_length,
+ reserved_address_space_);
- WasmAllocationTracker* const allocation_tracker =
- isolate->wasm_engine()->allocation_tracker();
+ base::LockGuard<base::Mutex> scope_lock(&mutex_);
- // Let the WasmAllocationTracker know we are going to reserve a bunch of
- // address space.
- if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
- // If we are over the address space limit, fail.
- return nullptr;
+ allocated_address_space_ += allocation_length;
+
+ allocations_.emplace(buffer_start,
+ AllocationData{allocation_base, allocation_length,
+ buffer_start, buffer_length});
+}
+
+WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
+ const void* buffer_start) {
+ if (IsEmptyBackingStore(buffer_start)) {
+ return AllocationData();
}
+ return InternalReleaseAllocation(buffer_start);
+}
- // The Reserve makes the whole region inaccessible by default.
- *allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
- PageAllocator::kNoAccess);
- if (*allocation_base == nullptr) {
- allocation_tracker->ReleaseAddressSpace(*allocation_length);
- return nullptr;
+WasmMemoryTracker::AllocationData WasmMemoryTracker::InternalReleaseAllocation(
+ const void* buffer_start) {
+ base::LockGuard<base::Mutex> scope_lock(&mutex_);
+
+ auto find_result = allocations_.find(buffer_start);
+ CHECK_NE(find_result, allocations_.end());
+
+ if (find_result != allocations_.end()) {
+ size_t num_bytes = find_result->second.allocation_length;
+ DCHECK_LE(num_bytes, reserved_address_space_);
+ DCHECK_LE(num_bytes, allocated_address_space_);
+ reserved_address_space_ -= num_bytes;
+ allocated_address_space_ -= num_bytes;
+
+ AllocationData allocation_data = find_result->second;
+ allocations_.erase(find_result);
+ return allocation_data;
}
+ UNREACHABLE();
+}
- void* memory = *allocation_base;
+const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
+ const void* buffer_start) {
+ base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ const auto& result = allocations_.find(buffer_start);
+ if (result != allocations_.end()) {
+ return &result->second;
+ }
+ return nullptr;
+}
- // Make the part we care about accessible.
- CHECK(SetPermissions(memory, RoundUp(size, kWasmPageSize),
- PageAllocator::kReadWrite));
+bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
+ base::LockGuard<base::Mutex> scope_lock(&mutex_);
+ return allocations_.find(buffer_start) != allocations_.end();
+}
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(size);
+void* WasmMemoryTracker::GetEmptyBackingStore(void** allocation_base,
+ size_t* allocation_length,
+ Heap* heap) {
+ if (empty_backing_store_.allocation_base == nullptr) {
+ constexpr size_t buffer_length = 0;
+ const bool require_guard_regions = trap_handler::IsTrapHandlerEnabled();
+ void* local_allocation_base;
+ size_t local_allocation_length;
+ void* buffer_start = TryAllocateBackingStore(
+ this, heap, buffer_length, require_guard_regions,
+ &local_allocation_base, &local_allocation_length);
- return memory;
+ empty_backing_store_ =
+ AllocationData(local_allocation_base, local_allocation_length,
+ buffer_start, buffer_length);
+ }
+ *allocation_base = empty_backing_store_.allocation_base;
+ *allocation_length = empty_backing_store_.allocation_length;
+ return empty_backing_store_.buffer_start;
+}
+
+bool WasmMemoryTracker::IsEmptyBackingStore(const void* buffer_start) const {
+ return buffer_start == empty_backing_store_.buffer_start;
+}
+
+bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(const void* buffer_start) {
+ if (IsEmptyBackingStore(buffer_start)) {
+ // We don't need to do anything for the empty backing store, because this
+ // will be freed when WasmMemoryTracker shuts down. Return true so callers
+ // will not try to free the buffer on their own.
+ return true;
+ }
+ if (IsWasmMemory(buffer_start)) {
+ const AllocationData allocation = ReleaseAllocation(buffer_start);
+ CHECK(FreePages(allocation.allocation_base, allocation.allocation_length));
+ return true;
+ }
+ return false;
}
-Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
- size_t allocation_length,
- void* backing_store, size_t size,
- bool is_external,
- bool enable_guard_regions,
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
+ size_t size, bool is_external,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(shared, TENURED);
DCHECK_GE(kMaxInt, size);
if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
- JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
- allocation_length, backing_store, static_cast<int>(size),
- shared);
+ constexpr bool is_wasm_memory = true;
+ JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
+ static_cast<int>(size), shared, is_wasm_memory);
buffer->set_is_neuterable(false);
buffer->set_is_growable(true);
- buffer->set_has_guard_region(enable_guard_regions);
return buffer;
}
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
- bool require_guard_regions,
- SharedFlag shared) {
+MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+ bool require_guard_regions,
+ SharedFlag shared) {
// Check against kMaxInt, since the byte length is stored as int in the
// JSArrayBuffer. Note that wasm_max_mem_pages can be raised from the command
// line, and we don't want to fail a CHECK then.
if (size > FLAG_wasm_max_mem_pages * kWasmPageSize || size > kMaxInt) {
// TODO(titzer): lift restriction on maximum memory allocated here.
- return Handle<JSArrayBuffer>::null();
+ return {};
}
- void* allocation_base = nullptr; // Set by TryAllocateBackingStore
- size_t allocation_length = 0; // Set by TryAllocateBackingStore
- // Do not reserve memory till non zero memory is encountered.
- void* memory = (size == 0) ? nullptr
- : TryAllocateBackingStore(
- isolate, size, require_guard_regions,
- &allocation_base, &allocation_length);
+ WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
- if (size > 0 && memory == nullptr) {
- return Handle<JSArrayBuffer>::null();
- }
+ // Set by TryAllocateBackingStore or GetEmptyBackingStore
+ void* allocation_base = nullptr;
+ size_t allocation_length = 0;
+
+ void* memory =
+ (size == 0)
+ ? memory_tracker->GetEmptyBackingStore(
+ &allocation_base, &allocation_length, isolate->heap())
+ : TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
+ require_guard_regions, &allocation_base,
+ &allocation_length);
+
+ if (size > 0 && memory == nullptr) return {};
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
@@ -134,11 +271,11 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
}
#endif
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
constexpr bool is_external = false;
- // All buffers have guard regions now, but sometimes they are small.
- constexpr bool has_guard_region = true;
- return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
- size, is_external, has_guard_region, shared);
+ return SetupArrayBuffer(isolate, memory, size, is_external, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
@@ -157,11 +294,12 @@ void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
// by Neuter. This means there is a dangling pointer until we neuter the
// buffer. Since there is no way for the user to directly call
// FreeBackingStore, we can ensure this is safe.
- buffer->FreeBackingStore();
+ buffer->FreeBackingStoreFromMainThread();
}
}
DCHECK(buffer->is_external());
+ buffer->set_is_wasm_memory(false);
buffer->set_is_neuterable(true);
buffer->Neuter();
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 438014b417..1652868bf1 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -5,6 +5,9 @@
#ifndef V8_WASM_WASM_MEMORY_H_
#define V8_WASM_WASM_MEMORY_H_
+#include <unordered_map>
+
+#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/handles.h"
#include "src/objects/js-array.h"
@@ -13,10 +16,10 @@ namespace v8 {
namespace internal {
namespace wasm {
-class WasmAllocationTracker {
+class WasmMemoryTracker {
public:
- WasmAllocationTracker() {}
- ~WasmAllocationTracker();
+ WasmMemoryTracker() {}
+ ~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
// to determine whether there is enough headroom to allocate another guarded
@@ -24,23 +27,99 @@ class WasmAllocationTracker {
// allocate the buffer), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
- // Reduces the address space counter so that the space can be reused.
- void ReleaseAddressSpace(size_t num_bytes);
+ void RegisterAllocation(void* allocation_base, size_t allocation_length,
+ void* buffer_start, size_t buffer_length);
+
+ struct AllocationData {
+ void* allocation_base = nullptr;
+ size_t allocation_length = 0;
+ void* buffer_start = nullptr;
+ size_t buffer_length = 0;
+
+ private:
+ AllocationData() = default;
+ AllocationData(void* allocation_base, size_t allocation_length,
+ void* buffer_start, size_t buffer_length)
+ : allocation_base(allocation_base),
+ allocation_length(allocation_length),
+ buffer_start(buffer_start),
+ buffer_length(buffer_length) {
+ DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
+ reinterpret_cast<uintptr_t>(buffer_start));
+ DCHECK_GE(
+ reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
+ reinterpret_cast<uintptr_t>(buffer_start));
+ DCHECK_GE(
+ reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
+ reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
+ }
+
+ friend WasmMemoryTracker;
+ };
+
+ // Decreases the amount of reserved address space
+ void ReleaseReservation(size_t num_bytes);
+
+ // Removes an allocation from the tracker
+ AllocationData ReleaseAllocation(const void* buffer_start);
+
+ bool IsWasmMemory(const void* buffer_start);
+
+ // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
+ // buffer is not tracked.
+ const AllocationData* FindAllocationData(const void* buffer_start);
+
+ // Empty WebAssembly memories are all backed by a shared inaccessible
+ // reservation. This method creates this store or returns the existing one if
+ // already created.
+ void* GetEmptyBackingStore(void** allocation_base, size_t* allocation_length,
+ Heap* heap);
+
+ bool IsEmptyBackingStore(const void* buffer_start) const;
+
+ // Checks if a buffer points to a Wasm memory and if so does any necessary
+ // work to reclaim the buffer. If this function returns false, the caller must
+ // free the buffer manually.
+ bool FreeMemoryIfIsWasmMemory(const void* buffer_start);
private:
- std::atomic_size_t allocated_address_space_{0};
+ AllocationData InternalReleaseAllocation(const void* buffer_start);
+
+ // Clients use a two-part process. First they "reserve" the address space,
+ // which signifies an intent to actually allocate it. This determines whether
+ // doing the allocation would put us over our limit. Once there is a
+ // reservation, clients can do the allocation and register the result.
+ //
+ // We should always have:
+ // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
+ std::atomic_size_t reserved_address_space_{0};
+
+ // Used to protect access to the allocated address space counter and
+ // allocation map. This is needed because Wasm memories can be freed on
+ // another thread by the ArrayBufferTracker.
+ base::Mutex mutex_;
- DISALLOW_COPY_AND_ASSIGN(WasmAllocationTracker);
+ size_t allocated_address_space_{0};
+
+ // Track Wasm memory allocation information. This is keyed by the start of the
+ // buffer, rather than by the start of the allocation.
+ std::unordered_map<const void*, AllocationData> allocations_;
+
+ // Empty backing stores still need to be backed by mapped pages when using
+ // trap handlers. Because this could eat up address space quickly, we keep a
+ // shared backing store here.
+ AllocationData empty_backing_store_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
-Handle<JSArrayBuffer> NewArrayBuffer(
+MaybeHandle<JSArrayBuffer> NewArrayBuffer(
Isolate*, size_t size, bool require_guard_regions,
SharedFlag shared = SharedFlag::kNotShared);
Handle<JSArrayBuffer> SetupArrayBuffer(
- Isolate*, void* allocation_base, size_t allocation_length,
- void* backing_store, size_t size, bool is_external,
- bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
+ Isolate*, void* backing_store, size_t size, bool is_external,
+ SharedFlag shared = SharedFlag::kNotShared);
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory);
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 909b62a16f..d8fa61ad99 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -15,7 +15,6 @@
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
-#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
#include "src/wasm/compilation-manager.h"
#include "src/wasm/module-decoder.h"
@@ -39,94 +38,64 @@ constexpr const char* WasmException::kRuntimeIdStr;
// static
constexpr const char* WasmException::kRuntimeValuesStr;
-void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
- Handle<FixedArray> code_table) {
- DisallowHeapAllocation no_gc;
- std::vector<trap_handler::ProtectedInstructionData> unpacked;
-
- for (int i = 0; i < code_table->length(); ++i) {
- Object* maybe_code = code_table->get(i);
- // This is sometimes undefined when we're called from cctests.
- if (maybe_code->IsUndefined(isolate)) continue;
- Code* code = Code::cast(maybe_code);
-
- if (code->kind() != Code::WASM_FUNCTION) {
- continue;
- }
-
- if (code->trap_handler_index()->value() != trap_handler::kInvalidIndex) {
- // This function has already been registered.
- continue;
- }
-
- byte* base = code->entry();
-
- FixedArray* protected_instructions = code->protected_instructions();
- DCHECK(protected_instructions != nullptr);
- for (int i = 0; i < protected_instructions->length();
- i += Code::kTrapDataSize) {
- trap_handler::ProtectedInstructionData data;
- data.instr_offset =
- protected_instructions
- ->GetValueChecked<Smi>(isolate, i + Code::kTrapCodeOffset)
- ->value();
- data.landing_offset =
- protected_instructions
- ->GetValueChecked<Smi>(isolate, i + Code::kTrapLandingOffset)
- ->value();
- unpacked.emplace_back(data);
- }
-
- if (unpacked.empty()) continue;
-
- const int index = RegisterHandlerData(base, code->instruction_size(),
- unpacked.size(), &unpacked[0]);
-
- unpacked.clear();
-
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
-
- // TODO(eholk): if index is negative, fail.
- DCHECK_LE(0, index);
- code->set_trap_handler_index(Smi::FromInt(index));
+WireBytesRef WasmModule::LookupName(const ModuleWireBytes* wire_bytes,
+ uint32_t function_index) const {
+ if (!names_) {
+ names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
+ wasm::DecodeFunctionNames(wire_bytes->start(), wire_bytes->end(),
+ names_.get());
}
+ auto it = names_->find(function_index);
+ if (it == names_->end()) return WireBytesRef();
+ return it->second;
}
-void UnpackAndRegisterProtectedInstructions(
- Isolate* isolate, const wasm::NativeModule* native_module) {
+WireBytesRef WasmModule::LookupName(SeqOneByteString* wire_bytes,
+ uint32_t function_index) const {
DisallowHeapAllocation no_gc;
+ uint8_t* chars = wire_bytes->GetChars();
+ ModuleWireBytes module_wire_bytes(chars, chars + wire_bytes->length());
+ return LookupName(&module_wire_bytes, function_index);
+}
- for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->FunctionCount();
- i < e; ++i) {
- wasm::WasmCode* code = native_module->GetCode(i);
-
- if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) {
- continue;
- }
-
- if (code->HasTrapHandlerIndex()) continue;
+void WasmModule::AddNameForTesting(int function_index, WireBytesRef name) {
+ if (!names_) {
+ names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
+ }
+ names_->insert(std::make_pair(function_index, name));
+}
- Address base = code->instructions().start();
+// Get a string stored in the module bytes representing a name.
+WasmName ModuleWireBytes::GetName(WireBytesRef ref) const {
+ if (ref.is_empty()) return {"<?>", 3}; // no name.
+ CHECK(BoundsCheck(ref.offset(), ref.length()));
+ return Vector<const char>::cast(
+ module_bytes_.SubVector(ref.offset(), ref.end_offset()));
+}
- size_t size = code->instructions().size();
- const int index =
- RegisterHandlerData(base, size, code->protected_instructions().size(),
- code->protected_instructions().data());
+// Get a string stored in the module bytes representing a function name.
+WasmName ModuleWireBytes::GetName(const WasmFunction* function,
+ const WasmModule* module) const {
+ return GetName(module->LookupName(this, function->func_index));
+}
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+// Get a string stored in the module bytes representing a name.
+WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
+ if (!ref.is_set()) return {nullptr, 0}; // no name.
+ CHECK(BoundsCheck(ref.offset(), ref.length()));
+ return Vector<const char>::cast(
+ module_bytes_.SubVector(ref.offset(), ref.end_offset()));
+}
- // TODO(eholk): if index is negative, fail.
- CHECK_LE(0, index);
- code->set_trap_handler_index(static_cast<size_t>(index));
- }
+// Get a string stored in the module bytes representing a function name.
+WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
+ const WasmModule* module) const {
+ return GetNameOrNull(module->LookupName(this, function->func_index));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
- if (name.function_->name.is_set()) {
+ if (!name.name_.is_empty()) {
if (name.name_.start()) {
os << ":";
os.write(name.name_.start(), name.name_.length());
@@ -140,56 +109,6 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
WasmModule::WasmModule(std::unique_ptr<Zone> owned)
: signature_zone(std::move(owned)) {}
-WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
- Handle<Object> target) {
- if (target->IsJSFunction()) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(target);
- if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
- auto exported = Handle<WasmExportedFunction>::cast(func);
- Handle<WasmInstanceObject> other_instance(exported->instance(), isolate);
- int func_index = exported->function_index();
- return &other_instance->module()->functions[func_index];
- }
- }
- return nullptr;
-}
-
-Handle<Object> GetOrCreateIndirectCallWrapper(
- Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig) {
- Address new_context_address =
- reinterpret_cast<Address>(owning_instance->wasm_context()->get());
- if (!wasm_code.IsCodeObject()) {
- DCHECK_NE(wasm_code.GetWasmCode()->kind(),
- wasm::WasmCode::kWasmToWasmWrapper);
- wasm::NativeModule* native_module = wasm_code.GetWasmCode()->owner();
- // The only reason we pass owning_instance is for the GC case. Check
- // that the values match.
- DCHECK_EQ(owning_instance->compiled_module()->GetNativeModule(),
- native_module);
- // We create the wrapper on the module exporting the function. This
- // wrapper will only be called as indirect call.
- wasm::WasmCode* exported_wrapper =
- native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
- if (exported_wrapper == nullptr) {
- wasm::NativeModuleModificationScope native_modification_scope(
- native_module);
- Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
- isolate, wasm_code, sig, new_context_address);
- exported_wrapper = native_module->AddExportedWrapper(
- new_wrapper, wasm_code.GetWasmCode()->index());
- }
- Address target = exported_wrapper->instructions().start();
- return isolate->factory()->NewForeign(target, TENURED);
- }
- CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
- Handle<Code> code = compiler::CompileWasmToWasmWrapper(
- isolate, wasm_code, sig, new_context_address);
- AttachWasmFunctionInfo(isolate, code, owning_instance,
- static_cast<int>(func_index));
- return code;
-}
-
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
@@ -378,8 +297,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
}
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
constexpr bool is_external = false;
- JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size, memory,
- size);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
DisallowHeapAllocation no_gc; // for raw access to string bytes.
Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 405b5f3ff4..4a44b04915 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -22,6 +22,7 @@ namespace internal {
class WasmCompiledModule;
class WasmDebugInfo;
+class WasmGlobalObject;
class WasmInstanceObject;
class WasmMemoryObject;
class WasmModuleObject;
@@ -35,13 +36,13 @@ class CallDescriptor;
namespace wasm {
class ErrorThrower;
class NativeModule;
+class TestingModuleBuilder;
// Static representation of a wasm function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
uint32_t func_index; // index into the function table.
uint32_t sig_index; // index into the signature table.
- WireBytesRef name; // function name, if any.
WireBytesRef code; // code of this function.
bool imported;
bool exported;
@@ -165,9 +166,16 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool is_wasm() const { return origin_ == kWasmOrigin; }
bool is_asm_js() const { return origin_ == kAsmJsOrigin; }
+ WireBytesRef LookupName(const ModuleWireBytes* wire_bytes,
+ uint32_t function_index) const;
+ WireBytesRef LookupName(SeqOneByteString* wire_bytes,
+ uint32_t function_index) const;
+ void AddNameForTesting(int function_index, WireBytesRef name);
+
private:
// TODO(kschimpf) - Encapsulate more fields.
ModuleOrigin origin_ = kWasmOrigin; // origin of the module
+ mutable std::unique_ptr<std::unordered_map<uint32_t, WireBytesRef>> names_;
};
typedef Managed<WasmModule> WasmModuleWrapper;
@@ -185,30 +193,18 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
}
// Get a string stored in the module bytes representing a name.
- WasmName GetName(WireBytesRef ref) const {
- if (ref.is_empty()) return {"<?>", 3}; // no name.
- CHECK(BoundsCheck(ref.offset(), ref.length()));
- return Vector<const char>::cast(
- module_bytes_.SubVector(ref.offset(), ref.end_offset()));
- }
+ WasmName GetName(WireBytesRef ref) const;
// Get a string stored in the module bytes representing a function name.
- WasmName GetName(const WasmFunction* function) const {
- return GetName(function->name);
- }
+ WasmName GetName(const WasmFunction* function,
+ const WasmModule* module) const;
// Get a string stored in the module bytes representing a name.
- WasmName GetNameOrNull(WireBytesRef ref) const {
- if (!ref.is_set()) return {nullptr, 0}; // no name.
- CHECK(BoundsCheck(ref.offset(), ref.length()));
- return Vector<const char>::cast(
- module_bytes_.SubVector(ref.offset(), ref.end_offset()));
- }
+ WasmName GetNameOrNull(WireBytesRef ref) const;
// Get a string stored in the module bytes representing a function name.
- WasmName GetNameOrNull(const WasmFunction* function) const {
- return GetNameOrNull(function->name);
- }
+ WasmName GetNameOrNull(const WasmFunction* function,
+ const WasmModule* module) const;
// Checks the given offset range is contained within the module bytes.
bool BoundsCheck(uint32_t offset, uint32_t length) const {
@@ -266,23 +262,6 @@ V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
// function index, the inner one by the local index.
Handle<FixedArray> DecodeLocalNames(Isolate*, Handle<WasmSharedModuleData>);
-// If the target is an export wrapper, return the {WasmFunction*} corresponding
-// to the wrapped wasm function; in all other cases, return nullptr.
-// The returned pointer is owned by the wasm instance target belongs to. The
-// result is alive as long as the instance exists.
-// TODO(titzer): move this to WasmExportedFunction.
-WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
-
-Handle<Object> GetOrCreateIndirectCallWrapper(
- Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
- WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig);
-
-void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
- Handle<FixedArray> code_table);
-
-void UnpackAndRegisterProtectedInstructions(
- Isolate* isolate, const wasm::NativeModule* native_module);
-
// TruncatedUserString makes it easy to output names up to a certain length, and
// output a truncation followed by '...' if they exceed a limit.
// Use like this:
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index 4891ad671a..8960ac2fc7 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -6,6 +6,7 @@
#define V8_WASM_WASM_OBJECTS_INL_H_
#include "src/heap/heap-inl.h"
+#include "src/v8memory.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -14,9 +15,13 @@ namespace internal {
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
+CAST_ACCESSOR(WasmCompiledModule)
+CAST_ACCESSOR(WasmDebugInfo)
+CAST_ACCESSOR(WasmGlobalObject)
CAST_ACCESSOR(WasmInstanceObject)
CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
+CAST_ACCESSOR(WasmSharedModuleData)
CAST_ACCESSOR(WasmTableObject)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
@@ -25,6 +30,20 @@ CAST_ACCESSOR(WasmTableObject)
} \
ACCESSORS(holder, name, type, offset)
+#define READ_PRIMITIVE_FIELD(p, type, offset) \
+ (*reinterpret_cast<type const*>(FIELD_ADDR_CONST(p, offset)))
+
+#define WRITE_PRIMITIVE_FIELD(p, type, offset, value) \
+ (*reinterpret_cast<type*>(FIELD_ADDR(p, offset)) = value)
+
+#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
+ type holder::name() const { \
+ return READ_PRIMITIVE_FIELD(this, type, offset); \
+ } \
+ void holder::set_##name(type value) { \
+ WRITE_PRIMITIVE_FIELD(this, type, offset, value); \
+ }
+
// WasmModuleObject
ACCESSORS(WasmModuleObject, compiled_module, WasmCompiledModule,
kCompiledModuleOffset)
@@ -37,12 +56,67 @@ ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
// WasmMemoryObject
ACCESSORS(WasmMemoryObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
SMI_ACCESSORS(WasmMemoryObject, maximum_pages, kMaximumPagesOffset)
-OPTIONAL_ACCESSORS(WasmMemoryObject, instances, WeakFixedArray,
+OPTIONAL_ACCESSORS(WasmMemoryObject, instances, FixedArrayOfWeakCells,
kInstancesOffset)
+// WasmGlobalObject
+ACCESSORS(WasmGlobalObject, array_buffer, JSArrayBuffer, kArrayBufferOffset)
+SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
+SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
+BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, type, WasmGlobalObject::TypeBits)
+BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, is_mutable,
+ WasmGlobalObject::IsMutableBit)
+
+// static
+uint32_t WasmGlobalObject::TypeSize(wasm::ValueType type) {
+ return 1U << ElementSizeLog2Of(type);
+}
+
+uint32_t WasmGlobalObject::type_size() const { return TypeSize(type()); }
+
+Address WasmGlobalObject::address() const {
+ uint32_t buffer_size = 0;
+ DCHECK(array_buffer()->byte_length()->ToUint32(&buffer_size));
+ DCHECK(offset() + type_size() <= buffer_size);
+ USE(buffer_size);
+ return Address(array_buffer()->backing_store()) + offset();
+}
+
+int32_t WasmGlobalObject::GetI32() { return Memory::int32_at(address()); }
+
+float WasmGlobalObject::GetF32() { return Memory::float_at(address()); }
+
+double WasmGlobalObject::GetF64() { return Memory::double_at(address()); }
+
+void WasmGlobalObject::SetI32(int32_t value) {
+ Memory::int32_at(address()) = value;
+}
+
+void WasmGlobalObject::SetF32(float value) {
+ Memory::float_at(address()) = value;
+}
+
+void WasmGlobalObject::SetF64(double value) {
+ Memory::double_at(address()) = value;
+}
+
// WasmInstanceObject
-ACCESSORS(WasmInstanceObject, wasm_context, Managed<WasmContext>,
- kWasmContextOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, uint32_t,
+ kMemorySizeOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, uint32_t,
+ kMemoryMaskOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, imported_function_targets, Address*,
+ kImportedFunctionTargetsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, globals_start, byte*,
+ kGlobalsStartOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_size, uint32_t,
+ kIndirectFunctionTableSizeOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
+ uint32_t*, kIndirectFunctionTableSigIdsOffset)
+PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
+ Address*, kIndirectFunctionTableTargetsOffset)
+
ACCESSORS(WasmInstanceObject, compiled_module, WasmCompiledModule,
kCompiledModuleOffset)
ACCESSORS(WasmInstanceObject, exports_object, JSObject, kExportsObjectOffset)
@@ -54,14 +128,37 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, table_object, WasmTableObject,
kTableObjectOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, function_tables, FixedArray,
- kFunctionTablesOffset)
-ACCESSORS(WasmInstanceObject, directly_called_instances, FixedArray,
- kDirectlyCalledInstancesOffset)
-ACCESSORS(WasmInstanceObject, js_imports_table, FixedArray,
- kJsImportsTableOffset)
+ACCESSORS(WasmInstanceObject, imported_function_instances, FixedArray,
+ kImportedFunctionInstancesOffset)
+ACCESSORS(WasmInstanceObject, imported_function_callables, FixedArray,
+ kImportedFunctionCallablesOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_table_instances,
+ FixedArray, kIndirectFunctionTableInstancesOffset)
+ACCESSORS(WasmInstanceObject, managed_native_allocations, Foreign,
+ kManagedNativeAllocationsOffset)
+OPTIONAL_ACCESSORS(WasmInstanceObject, managed_indirect_patcher, Foreign,
+ kManagedIndirectPatcherOffset)
+
+inline bool WasmInstanceObject::has_indirect_function_table() {
+ return indirect_function_table_sig_ids() != nullptr;
+}
+
+IndirectFunctionTableEntry::IndirectFunctionTableEntry(
+ WasmInstanceObject* instance, int index)
+ : instance_(instance), index_(index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, instance->indirect_function_table_size());
+}
+
+ImportedFunctionEntry::ImportedFunctionEntry(WasmInstanceObject* instance,
+ int index)
+ : instance_(instance), index_(index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, instance->module()->num_imported_functions);
+}
// WasmSharedModuleData
+ACCESSORS(WasmSharedModuleData, module_wrapper, Object, kModuleWrapperOffset)
ACCESSORS(WasmSharedModuleData, module_bytes, SeqOneByteString,
kModuleBytesOffset)
ACCESSORS(WasmSharedModuleData, script, Script, kScriptOffset)
@@ -69,10 +166,16 @@ OPTIONAL_ACCESSORS(WasmSharedModuleData, asm_js_offset_table, ByteArray,
kAsmJsOffsetTableOffset)
OPTIONAL_ACCESSORS(WasmSharedModuleData, breakpoint_infos, FixedArray,
kBreakPointInfosOffset)
+void WasmSharedModuleData::reset_breakpoint_infos() {
+ DCHECK(IsWasmSharedModuleData());
+ WRITE_FIELD(this, kBreakPointInfosOffset, GetHeap()->undefined_value());
+}
-OPTIONAL_ACCESSORS(WasmSharedModuleData, lazy_compilation_orchestrator, Foreign,
- kLazyCompilationOrchestratorOffset)
-
+// WasmDebugInfo
+ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
+ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
+ACCESSORS(WasmDebugInfo, interpreted_functions, Object,
+ kInterpretedFunctionsOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
kCWasmEntriesOffset)
@@ -81,76 +184,65 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef OPTIONAL_ACCESSORS
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
- TYPE* WasmCompiledModule::maybe_##NAME() const { \
- Object* obj = get(ID); \
- if (!(TYPE_CHECK)) return nullptr; \
- return TYPE::cast(obj); \
- } \
- \
- TYPE* WasmCompiledModule::NAME() const { \
- Object* obj = get(ID); \
- DCHECK(TYPE_CHECK); \
- return TYPE::cast(obj); \
- } \
- \
- bool WasmCompiledModule::has_##NAME() const { \
- Object* obj = get(ID); \
- return TYPE_CHECK; \
- } \
- \
- void WasmCompiledModule::reset_##NAME() { set_undefined(ID); } \
- \
- void WasmCompiledModule::set_##NAME(TYPE* value) { set(ID, value); }
-
-#define WCM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
-
-#define WCM_CONST_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), private)
-
-#define WCM_WASM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
-
-#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
- TYPE WasmCompiledModule::NAME() const { \
- return static_cast<TYPE>(Smi::ToInt(get(kID_##NAME))); \
- } \
- \
- void WasmCompiledModule::set_##NAME(TYPE value) { \
- set(kID_##NAME, Smi::FromInt(value)); \
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, OFFSET, TYPE_CHECK) \
+ bool WasmCompiledModule::has_##NAME() const { \
+ Object* value = READ_FIELD(this, OFFSET); \
+ return TYPE_CHECK; \
+ } \
+ \
+ void WasmCompiledModule::reset_##NAME() { \
+ WRITE_FIELD(this, OFFSET, GetHeap()->undefined_value()); \
+ } \
+ \
+ ACCESSORS_CHECKED2(WasmCompiledModule, NAME, TYPE, OFFSET, TYPE_CHECK, true)
+
+#define WCM_OBJECT(TYPE, NAME, OFFSET) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, OFFSET, value->Is##TYPE())
+
+#define WCM_SMALL_CONST_NUMBER(TYPE, NAME, OFFSET) \
+ TYPE WasmCompiledModule::NAME() const { \
+ return static_cast<TYPE>(Smi::ToInt(READ_FIELD(this, OFFSET))); \
+ } \
+ \
+ void WasmCompiledModule::set_##NAME(TYPE value) { \
+ WRITE_FIELD(this, OFFSET, Smi::FromInt(value)); \
}
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
- public) \
- \
- TYPE* WasmCompiledModule::NAME() const { \
- DCHECK(!weak_##NAME()->cleared()); \
- return TYPE::cast(weak_##NAME()->value()); \
+#define WCM_WEAK_LINK(TYPE, NAME, OFFSET) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, OFFSET, value->IsWeakCell()) \
+ \
+ TYPE* WasmCompiledModule::NAME() const { \
+ DCHECK(!weak_##NAME()->cleared()); \
+ return TYPE::cast(weak_##NAME()->value()); \
}
-#define DEFINITION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
-WCM_PROPERTY_TABLE(DEFINITION)
-#undef DECLARATION
+// WasmCompiledModule
+WCM_OBJECT(WasmSharedModuleData, shared, kSharedOffset)
+WCM_WEAK_LINK(Context, native_context, kNativeContextOffset)
+WCM_OBJECT(FixedArray, export_wrappers, kExportWrappersOffset)
+WCM_OBJECT(WasmCompiledModule, next_instance, kNextInstanceOffset)
+WCM_OBJECT(WasmCompiledModule, prev_instance, kPrevInstanceOffset)
+WCM_WEAK_LINK(WasmInstanceObject, owning_instance, kOwningInstanceOffset)
+WCM_WEAK_LINK(WasmModuleObject, wasm_module, kWasmModuleOffset)
+WCM_OBJECT(Foreign, native_module, kNativeModuleOffset)
+WCM_SMALL_CONST_NUMBER(bool, use_trap_handler, kUseTrapHandlerOffset)
+ACCESSORS(WasmCompiledModule, raw_next_instance, Object, kNextInstanceOffset);
+ACCESSORS(WasmCompiledModule, raw_prev_instance, Object, kPrevInstanceOffset);
-#undef WCM_CONST_OBJECT
-#undef WCM_LARGE_NUMBER
#undef WCM_OBJECT_OR_WEAK
+#undef WCM_OBJECT
#undef WCM_SMALL_CONST_NUMBER
#undef WCM_WEAK_LINK
+#undef READ_PRIMITIVE_FIELD
+#undef WRITE_PRIMITIVE_FIELD
+#undef PRIMITIVE_ACCESSORS
uint32_t WasmTableObject::current_length() { return functions()->length(); }
-bool WasmTableObject::has_maximum_length() {
- return maximum_length()->Number() >= 0;
-}
-
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
-void WasmCompiledModule::ReplaceCodeTableForTesting(
- Handle<FixedArray> testing_table) {
- set_code_table(*testing_table);
+inline bool WasmCompiledModule::has_instance() const {
+ return !weak_owning_instance()->cleared();
}
#include "src/objects/object-macros-undef.h"
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index f06f3240f0..df9c20a8c2 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -27,16 +27,105 @@
if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE_IFT(...) \
+ do { \
+ if (false) PrintF(__VA_ARGS__); \
+ } while (false)
+
namespace v8 {
namespace internal {
// Import a few often used types from the wasm namespace.
-using GlobalHandleAddress = wasm::GlobalHandleAddress;
using WasmFunction = wasm::WasmFunction;
using WasmModule = wasm::WasmModule;
namespace {
+// Manages the natively-allocated memory for a WasmInstanceObject. Since
+// an instance finalizer is not guaranteed to run upon isolate shutdown,
+// we must use a Managed<WasmInstanceNativeAllocations> to guarantee
+// it is freed.
+// Native allocations are the signature ids and targets for indirect call
+// targets, as well as the call targets for imported functions.
+class WasmInstanceNativeAllocations {
+ public:
+// Helper macro to set an internal field and the corresponding field
+// on an instance.
+#define SET(instance, field, value) \
+ { \
+ auto v = value; \
+ this->field##_ = v; \
+ instance->set_##field(v); \
+ }
+
+ // Allocates initial native storage for a given instance.
+ WasmInstanceNativeAllocations(Handle<WasmInstanceObject> instance,
+ size_t num_imported_functions) {
+ SET(instance, imported_function_targets,
+ reinterpret_cast<Address*>(
+ calloc(num_imported_functions, sizeof(Address))));
+ }
+ ~WasmInstanceNativeAllocations() { free(); }
+ // Frees natively-allocated storage.
+ void free() {
+ ::free(indirect_function_table_sig_ids_);
+ ::free(indirect_function_table_targets_);
+ ::free(imported_function_targets_);
+ indirect_function_table_sig_ids_ = nullptr;
+ indirect_function_table_targets_ = nullptr;
+ imported_function_targets_ = nullptr;
+ }
+ // Resizes the indirect function table.
+ void resize_indirect_function_table(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t new_size) {
+ uint32_t old_size = instance->indirect_function_table_size();
+ void* new_sig_ids = nullptr;
+ void* new_targets = nullptr;
+ Handle<FixedArray> new_instances;
+ if (indirect_function_table_sig_ids_) {
+ // Reallocate the old storage.
+ new_sig_ids = realloc(indirect_function_table_sig_ids_,
+ new_size * sizeof(uint32_t));
+ new_targets =
+ realloc(indirect_function_table_targets_, new_size * sizeof(Address));
+
+ Handle<FixedArray> old(instance->indirect_function_table_instances(),
+ isolate);
+ new_instances = isolate->factory()->CopyFixedArrayAndGrow(
+ old, static_cast<int>(new_size - old_size));
+ } else {
+ // Allocate new storage.
+ new_sig_ids = malloc(new_size * sizeof(uint32_t));
+ new_targets = malloc(new_size * sizeof(Address));
+ new_instances =
+ isolate->factory()->NewFixedArray(static_cast<int>(new_size));
+ }
+ // Initialize new entries.
+ instance->set_indirect_function_table_size(new_size);
+ SET(instance, indirect_function_table_sig_ids,
+ reinterpret_cast<uint32_t*>(new_sig_ids));
+ SET(instance, indirect_function_table_targets,
+ reinterpret_cast<Address*>(new_targets));
+
+ instance->set_indirect_function_table_instances(*new_instances);
+ for (uint32_t j = old_size; j < new_size; j++) {
+ IndirectFunctionTableEntry(*instance, static_cast<int>(j)).clear();
+ }
+ }
+ uint32_t* indirect_function_table_sig_ids_ = nullptr;
+ Address* indirect_function_table_targets_ = nullptr;
+ Address* imported_function_targets_ = nullptr;
+#undef SET
+};
+
+WasmInstanceNativeAllocations* GetNativeAllocations(
+ WasmInstanceObject* instance) {
+ return reinterpret_cast<Managed<WasmInstanceNativeAllocations>*>(
+ instance->managed_native_allocations())
+ ->get();
+}
+
// An iterator that returns first the module itself, then all modules linked via
// next, then all linked via prev.
class CompiledModulesIterator
@@ -120,9 +209,7 @@ class CompiledModuleInstancesIterator
private:
bool NeedToAdvance() {
- return !it.current_.is_null() &&
- (!it.current_->has_weak_owning_instance() ||
- it.current_->weak_owning_instance()->cleared());
+ return !it.current_.is_null() && !it.current_->has_instance();
}
CompiledModulesIterator it;
};
@@ -155,14 +242,6 @@ bool IsBreakablePosition(WasmSharedModuleData* shared, int func_index,
}
#endif // DEBUG
-void CompiledModuleFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmCompiledModule* compiled_module = WasmCompiledModule::cast(*p);
- compiled_module->reset_native_module();
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
-}
-
enum DispatchTableElements : int {
kDispatchTableInstanceOffset,
kDispatchTableIndexOffset,
@@ -183,6 +262,8 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Handle<WeakCell> link_to_module =
isolate->factory()->NewWeakCell(module_object);
compiled_module->set_weak_wasm_module(*link_to_module);
+
+ compiled_module->LogWasmCodes(isolate);
return module_object;
}
@@ -194,7 +275,7 @@ void WasmModuleObject::ValidateStateForTesting(
CHECK_EQ(compiled_module->weak_wasm_module()->value(), *module_obj);
CHECK(!compiled_module->has_prev_instance());
CHECK(!compiled_module->has_next_instance());
- CHECK(!compiled_module->has_weak_owning_instance());
+ CHECK(!compiled_module->has_instance());
}
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
@@ -222,9 +303,7 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
void WasmTableObject::AddDispatchTable(Isolate* isolate,
Handle<WasmTableObject> table_obj,
Handle<WasmInstanceObject> instance,
- int table_index,
- Handle<FixedArray> function_table) {
- DCHECK_EQ(0, function_table->length() % compiler::kFunctionTableEntrySize);
+ int table_index) {
Handle<FixedArray> dispatch_tables(table_obj->dispatch_tables());
int old_length = dispatch_tables->length();
DCHECK_EQ(0, old_length % kDispatchTableNumElements);
@@ -241,8 +320,6 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
*instance);
new_dispatch_tables->set(old_length + kDispatchTableIndexOffset,
Smi::FromInt(table_index));
- new_dispatch_tables->set(old_length + kDispatchTableFunctionTableOffset,
- *function_table);
table_obj->set_dispatch_tables(*new_dispatch_tables);
}
@@ -253,76 +330,21 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
Handle<FixedArray> dispatch_tables(this->dispatch_tables());
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
uint32_t old_size = functions()->length();
- constexpr int kInvalidSigIndex = -1;
-
- if (WASM_CONTEXT_TABLES) {
- // If tables are stored in the WASM context, no code patching is
- // necessary. We simply have to grow the raw tables in the WasmContext
- // for each instance that has imported this table.
-
- // TODO(titzer): replace the dispatch table with a weak list of all
- // the instances that import a given table.
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- // TODO(titzer): potentially racy update of WasmContext::table
- WasmContext* wasm_context =
- WasmInstanceObject::cast(dispatch_tables->get(i))
- ->wasm_context()
- ->get();
- DCHECK_EQ(old_size, wasm_context->table_size);
- uint32_t new_size = old_size + count;
- wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
- realloc(wasm_context->table,
- new_size * sizeof(IndirectFunctionTableEntry)));
- for (uint32_t j = old_size; j < new_size; j++) {
- wasm_context->table[j].sig_id = kInvalidSigIndex;
- wasm_context->table[j].context = nullptr;
- wasm_context->table[j].target = nullptr;
- }
- wasm_context->table_size = new_size;
- }
- return;
- }
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
+ // Tables are stored in the instance object, no code patching is
+ // necessary. We simply have to grow the raw tables in each instance
+ // that has imported this table.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ // TODO(titzer): replace the dispatch table with a weak list of all
+ // the instances that import a given table.
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- Handle<FixedArray> old_function_table(FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset)));
- Handle<FixedArray> new_function_table = isolate->global_handles()->Create(
- *isolate->factory()->CopyFixedArrayAndGrow(
- old_function_table, count * compiler::kFunctionTableEntrySize));
-
- GlobalHandleAddress new_function_table_addr = new_function_table.address();
-
- int table_index =
- Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset))->value();
- // Update dispatch tables with new function tables.
- dispatch_tables->set(i + kDispatchTableFunctionTableOffset,
- *new_function_table);
-
- // Patch the code of the respective instance.
- if (!WASM_CONTEXT_TABLES) {
- DisallowHeapAllocation no_gc;
- wasm::CodeSpecialization code_specialization(isolate,
- &specialization_zone);
- WasmInstanceObject* instance =
- WasmInstanceObject::cast(dispatch_tables->get(i));
- WasmCompiledModule* compiled_module = instance->compiled_module();
- GlobalHandleAddress old_function_table_addr =
- WasmCompiledModule::GetTableValue(compiled_module->function_tables(),
- table_index);
- code_specialization.PatchTableSize(old_size, old_size + count);
- code_specialization.RelocatePointer(old_function_table_addr,
- new_function_table_addr);
- code_specialization.ApplyToWholeInstance(instance);
- WasmCompiledModule::UpdateTableValue(compiled_module->function_tables(),
- table_index,
- new_function_table_addr);
- }
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
+ DCHECK_EQ(old_size, instance->indirect_function_table_size());
+ uint32_t new_size = old_size + count;
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(instance,
+ new_size);
}
}
@@ -336,65 +358,38 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
}
// TODO(titzer): Change this to MaybeHandle<WasmExportedFunction>
+ DCHECK(WasmExportedFunction::IsWasmExportedFunction(*function));
auto exported_function = Handle<WasmExportedFunction>::cast(function);
- auto* wasm_function = wasm::GetWasmFunctionForExport(isolate, function);
+ Handle<WasmInstanceObject> other_instance(exported_function->instance());
+ int func_index = exported_function->function_index();
+ auto* wasm_function = &other_instance->module()->functions[func_index];
DCHECK_NOT_NULL(wasm_function);
DCHECK_NOT_NULL(wasm_function->sig);
- WasmCodeWrapper wasm_code = exported_function->GetWasmCode();
+ wasm::WasmCode* wasm_code = exported_function->GetWasmCode();
UpdateDispatchTables(isolate, table, table_index, wasm_function->sig,
- handle(exported_function->instance()), wasm_code,
- exported_function->function_index());
+ handle(exported_function->instance()), wasm_code);
array->set(table_index, *function);
}
void WasmTableObject::UpdateDispatchTables(
Isolate* isolate, Handle<WasmTableObject> table, int table_index,
wasm::FunctionSig* sig, Handle<WasmInstanceObject> from_instance,
- WasmCodeWrapper wasm_code, int func_index) {
- if (WASM_CONTEXT_TABLES) {
- // We simply need to update the WASM contexts for each instance
- // that imports this table.
- DisallowHeapAllocation no_gc;
- FixedArray* dispatch_tables = table->dispatch_tables();
- DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
-
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- WasmInstanceObject* to_instance = WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset));
- auto sig_id = to_instance->module()->signature_map.Find(sig);
- auto& entry = to_instance->wasm_context()->get()->table[table_index];
- entry.sig_id = sig_id;
- entry.context = from_instance->wasm_context()->get();
- entry.target = wasm_code.instructions().start();
- }
- } else {
- // We may need to compile a new WASM->WASM wrapper for this.
- Handle<Object> code_or_foreign = wasm::GetOrCreateIndirectCallWrapper(
- isolate, from_instance, wasm_code, func_index, sig);
+ wasm::WasmCode* wasm_code) {
+ // We simply need to update the IFTs for each instance that imports
+ // this table.
+ DisallowHeapAllocation no_gc;
+ FixedArray* dispatch_tables = table->dispatch_tables();
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- DisallowHeapAllocation no_gc;
- FixedArray* dispatch_tables = table->dispatch_tables();
- DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
-
- for (int i = 0; i < dispatch_tables->length();
- i += kDispatchTableNumElements) {
- // Note that {SignatureMap::Find} may return {-1} if the signature is
- // not found; it will simply never match any check.
- WasmInstanceObject* to_instance = WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset));
- auto sig_id = to_instance->module()->signature_map.Find(sig);
-
- FixedArray* function_table = FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
-
- function_table->set(compiler::FunctionTableSigOffset(table_index),
- Smi::FromInt(sig_id));
- function_table->set(compiler::FunctionTableCodeOffset(table_index),
- *code_or_foreign);
- }
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ WasmInstanceObject* to_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ auto sig_id = to_instance->module()->signature_map.Find(sig);
+ IndirectFunctionTableEntry(to_instance, table_index)
+ .set(sig_id, *from_instance, wasm_code);
}
}
@@ -405,31 +400,20 @@ void WasmTableObject::ClearDispatchTables(Handle<WasmTableObject> table,
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
- if (WASM_CONTEXT_TABLES) {
- constexpr int kInvalidSigIndex = -1; // TODO(titzer): move to header.
- WasmInstanceObject* to_instance = WasmInstanceObject::cast(
- dispatch_tables->get(i + kDispatchTableInstanceOffset));
- DCHECK_LT(index, to_instance->wasm_context()->get()->table_size);
- auto& entry = to_instance->wasm_context()->get()->table[index];
- entry.sig_id = kInvalidSigIndex;
- entry.context = nullptr;
- entry.target = nullptr;
- } else {
- FixedArray* function_table = FixedArray::cast(
- dispatch_tables->get(i + kDispatchTableFunctionTableOffset));
- function_table->set(compiler::FunctionTableSigOffset(index),
- Smi::FromInt(-1));
- function_table->set(compiler::FunctionTableCodeOffset(index), Smi::kZero);
- }
+ WasmInstanceObject* target_instance = WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset));
+ DCHECK_LT(index, target_instance->indirect_function_table_size());
+ IndirectFunctionTableEntry(target_instance, index).clear();
}
}
namespace {
-Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> old_buffer,
- uint32_t pages, uint32_t maximum_pages,
- bool use_trap_handler) {
- if (!old_buffer->is_growable()) return Handle<JSArrayBuffer>::null();
+MaybeHandle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
+ Handle<JSArrayBuffer> old_buffer,
+ uint32_t pages,
+ uint32_t maximum_pages,
+ bool use_trap_handler) {
+ if (!old_buffer->is_growable()) return {};
Address old_mem_start = nullptr;
uint32_t old_size = 0;
if (!old_buffer.is_null()) {
@@ -440,14 +424,12 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
uint32_t old_pages = old_size / wasm::kWasmPageSize;
DCHECK_GE(std::numeric_limits<uint32_t>::max(),
old_size + pages * wasm::kWasmPageSize);
- if (old_pages > maximum_pages || pages > maximum_pages - old_pages) {
- return Handle<JSArrayBuffer>::null();
- }
+ if (old_pages > maximum_pages || pages > maximum_pages - old_pages) return {};
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
if (new_size > FLAG_wasm_max_mem_pages * wasm::kWasmPageSize ||
new_size > kMaxInt) {
- return Handle<JSArrayBuffer>::null();
+ return {};
}
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
@@ -457,34 +439,39 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
((new_size < old_buffer->allocation_length()) || old_size == new_size)) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
- CHECK(i::SetPermissions(old_mem_start, new_size,
- PageAllocator::kReadWrite));
+ // If adjusting permissions fails, propagate error back to return
+ // failure to grow.
+ DCHECK(!isolate->wasm_engine()->memory_tracker()->IsEmptyBackingStore(
+ old_mem_start));
+ if (!i::SetPermissions(old_mem_start, new_size,
+ PageAllocator::kReadWrite)) {
+ return {};
+ }
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
- void* allocation_base = old_buffer->allocation_base();
- size_t allocation_length = old_buffer->allocation_length();
void* backing_store = old_buffer->backing_store();
- bool has_guard_region = old_buffer->has_guard_region();
bool is_external = old_buffer->is_external();
// Disconnect buffer early so GC won't free it.
i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
- Handle<JSArrayBuffer> new_buffer = wasm::SetupArrayBuffer(
- isolate, allocation_base, allocation_length, backing_store, new_size,
- is_external, has_guard_region);
+ Handle<JSArrayBuffer> new_buffer =
+ wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
return new_buffer;
} else {
- bool free_memory = false;
+ // We couldn't reuse the old backing store, so create a new one and copy the
+ // old contents in.
Handle<JSArrayBuffer> new_buffer;
- // Allocate a new buffer and memcpy the old contents.
- free_memory = true;
- new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
- if (new_buffer.is_null() || old_size == 0) return new_buffer;
+ if (!wasm::NewArrayBuffer(isolate, new_size, use_trap_handler)
+ .ToHandle(&new_buffer)) {
+ return {};
+ }
+ if (old_size == 0) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
+ constexpr bool free_memory = true;
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
@@ -493,16 +480,17 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
- auto wasm_context = instance->wasm_context()->get();
- wasm_context->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
- buffer->byte_length()->Number());
+ instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
+ buffer->byte_length()->Number());
#if DEBUG
// To flush out bugs earlier, in DEBUG mode, check that all pages of the
// memory are accessible by reading and writing one byte on each page.
- for (uint32_t offset = 0; offset < wasm_context->mem_size;
- offset += wasm::kWasmPageSize) {
- byte val = wasm_context->mem_start[offset];
- wasm_context->mem_start[offset] = val;
+ byte* mem_start = instance->memory_start();
+ uintptr_t mem_size = instance->memory_size();
+ for (uint32_t offset = 0; offset < mem_size; offset += wasm::kWasmPageSize) {
+ byte val = mem_start[offset];
+ USE(val);
+ mem_start[offset] = val;
}
#endif
}
@@ -524,11 +512,7 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
Handle<JSArrayBuffer> buffer;
if (maybe_buffer.is_null()) {
// If no buffer was provided, create a 0-length one.
-
- // TODO(kschimpf): Modify to use argument defining style of
- // memory. (see above).
- buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
- trap_handler::IsTrapHandlerEnabled());
+ buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
} else {
buffer = maybe_buffer.ToHandleChecked();
// Paranoid check that the buffer size makes sense.
@@ -550,12 +534,12 @@ uint32_t WasmMemoryObject::current_pages() {
void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) {
- Handle<WeakFixedArray> old_instances =
+ Handle<FixedArrayOfWeakCells> old_instances =
memory->has_instances()
- ? Handle<WeakFixedArray>(memory->instances(), isolate)
- : Handle<WeakFixedArray>::null();
- Handle<WeakFixedArray> new_instances =
- WeakFixedArray::Add(old_instances, instance);
+ ? Handle<FixedArrayOfWeakCells>(memory->instances(), isolate)
+ : Handle<FixedArrayOfWeakCells>::null();
+ Handle<FixedArrayOfWeakCells> new_instances =
+ FixedArrayOfWeakCells::Add(old_instances, instance);
memory->set_instances(*new_instances);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate);
SetInstanceMemory(isolate, instance, buffer);
@@ -587,12 +571,15 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
}
// TODO(kschimpf): We need to fix this by adding a field to WasmMemoryObject
// that defines the style of memory being used.
- new_buffer = GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages,
- trap_handler::IsTrapHandlerEnabled());
- if (new_buffer.is_null()) return -1;
+ if (!GrowMemoryBuffer(isolate, old_buffer, pages, maximum_pages,
+ trap_handler::IsTrapHandlerEnabled())
+ .ToHandle(&new_buffer)) {
+ return -1;
+ }
if (memory_object->has_instances()) {
- Handle<WeakFixedArray> instances(memory_object->instances(), isolate);
+ Handle<FixedArrayOfWeakCells> instances(memory_object->instances(),
+ isolate);
for (int i = 0; i < instances->Length(); i++) {
Object* elem = instances->Get(i);
if (!elem->IsWasmInstanceObject()) continue;
@@ -605,6 +592,138 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
return old_size / wasm::kWasmPageSize;
}
+// static
+MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> maybe_buffer,
+ wasm::ValueType type, int32_t offset, bool is_mutable) {
+ Handle<JSFunction> global_ctor(
+ isolate->native_context()->wasm_global_constructor());
+ auto global_obj = Handle<WasmGlobalObject>::cast(
+ isolate->factory()->NewJSObject(global_ctor));
+
+ uint32_t type_size = TypeSize(type);
+
+ Handle<JSArrayBuffer> buffer;
+ if (!maybe_buffer.ToHandle(&buffer)) {
+ // If no buffer was provided, create one long enough for the given type.
+ buffer =
+ isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
+
+ const bool initialize = true;
+ if (!JSArrayBuffer::SetupAllocatingData(buffer, isolate, type_size,
+ initialize)) {
+ return {};
+ }
+ }
+
+ // Check that the offset is in bounds.
+ uint32_t buffer_size = 0;
+ CHECK(buffer->byte_length()->ToUint32(&buffer_size));
+ CHECK(offset + type_size <= buffer_size);
+
+ global_obj->set_array_buffer(*buffer);
+ global_obj->set_flags(0);
+ global_obj->set_type(type);
+ global_obj->set_offset(offset);
+ global_obj->set_is_mutable(is_mutable);
+
+ return global_obj;
+}
+
+void IndirectFunctionTableEntry::clear() {
+ instance_->indirect_function_table_sig_ids()[index_] = -1;
+ instance_->indirect_function_table_targets()[index_] = 0;
+ instance_->indirect_function_table_instances()->set(
+ index_, instance_->GetIsolate()->heap()->undefined_value());
+}
+
+void IndirectFunctionTableEntry::set(int sig_id, WasmInstanceObject* instance,
+ const wasm::WasmCode* wasm_code) {
+ TRACE_IFT("IFT entry %p[%d] = {sig_id=%d, instance=%p, target=%p}\n",
+ instance_, index_, sig_id, instance,
+ wasm_code->instructions().start());
+ instance_->indirect_function_table_sig_ids()[index_] = sig_id;
+ instance_->indirect_function_table_targets()[index_] =
+ wasm_code->instructions().start();
+ instance_->indirect_function_table_instances()->set(index_, instance);
+}
+
+WasmInstanceObject* IndirectFunctionTableEntry::instance() {
+ return WasmInstanceObject::cast(
+ instance_->indirect_function_table_instances()->get(index_));
+}
+
+int IndirectFunctionTableEntry::sig_id() {
+ return instance_->indirect_function_table_sig_ids()[index_];
+}
+
+Address IndirectFunctionTableEntry::target() {
+ return instance_->indirect_function_table_targets()[index_];
+}
+
+void ImportedFunctionEntry::set(JSReceiver* callable,
+ const wasm::WasmCode* wasm_to_js_wrapper) {
+ TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n", instance_,
+ index_, callable, wasm_to_js_wrapper->instructions().start());
+ DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_to_js_wrapper->kind());
+ instance_->imported_function_instances()->set(index_, instance_);
+ instance_->imported_function_callables()->set(index_, callable);
+ instance_->imported_function_targets()[index_] =
+ wasm_to_js_wrapper->instructions().start();
+}
+
+void ImportedFunctionEntry::set(WasmInstanceObject* instance,
+ const wasm::WasmCode* wasm_code) {
+ TRACE_IFT("Import WASM %p[%d] = {instance=%p, target=%p}\n", instance_,
+ index_, instance, wasm_code->instructions().start());
+ instance_->imported_function_instances()->set(index_, instance);
+ instance_->imported_function_callables()->set(
+ index_, instance_->GetHeap()->undefined_value());
+ instance_->imported_function_targets()[index_] =
+ wasm_code->instructions().start();
+}
+
+WasmInstanceObject* ImportedFunctionEntry::instance() {
+ return WasmInstanceObject::cast(
+ instance_->imported_function_instances()->get(index_));
+}
+
+JSReceiver* ImportedFunctionEntry::callable() {
+ return JSReceiver::cast(
+ instance_->imported_function_callables()->get(index_));
+}
+
+Address ImportedFunctionEntry::target() {
+ return instance_->imported_function_targets()[index_];
+}
+
+bool ImportedFunctionEntry::is_js_receiver_entry() {
+ return instance_->imported_function_callables()->get(index_)->IsJSReceiver();
+}
+
+bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ Handle<WasmInstanceObject> instance, uint32_t minimum_size) {
+ uint32_t old_size = instance->indirect_function_table_size();
+ if (old_size >= minimum_size) return false; // Nothing to do.
+
+ Isolate* isolate = instance->GetIsolate();
+ HandleScope scope(isolate);
+ auto native_allocations = GetNativeAllocations(*instance);
+ native_allocations->resize_indirect_function_table(isolate, instance,
+ minimum_size);
+ return true;
+}
+
+void WasmInstanceObject::SetRawMemory(byte* mem_start, uint32_t mem_size) {
+ DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
+ uint32_t mem_size64 = mem_size;
+ uint32_t mem_mask64 = base::bits::RoundUpToPowerOfTwo32(mem_size) - 1;
+ DCHECK_LE(mem_size, mem_mask64 + 1);
+ set_memory_start(mem_start);
+ set_memory_size(mem_size64);
+ set_memory_mask(mem_mask64);
+}
+
WasmModuleObject* WasmInstanceObject::module_object() {
return compiled_module()->wasm_module();
}
@@ -631,21 +750,30 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
Handle<WasmInstanceObject> instance(
reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
- auto wasm_context = Managed<WasmContext>::Allocate(isolate);
- wasm_context->get()->SetRawMemory(nullptr, 0);
- wasm_context->get()->globals_start = nullptr;
- instance->set_wasm_context(*wasm_context);
+ // Initialize the imported function arrays.
+ auto num_imported_functions =
+ compiled_module->shared()->module()->num_imported_functions;
+ auto native_allocations = Managed<WasmInstanceNativeAllocations>::Allocate(
+ isolate, instance, num_imported_functions);
+ instance->set_managed_native_allocations(*native_allocations);
+
+ Handle<FixedArray> imported_function_instances =
+ isolate->factory()->NewFixedArray(num_imported_functions);
+
+ instance->set_imported_function_instances(*imported_function_instances);
+ Handle<FixedArray> imported_function_callables =
+ isolate->factory()->NewFixedArray(num_imported_functions);
+
+ instance->set_imported_function_callables(*imported_function_callables);
+ instance->SetRawMemory(nullptr, 0);
+ instance->set_globals_start(nullptr);
+ instance->set_indirect_function_table_size(0);
+ instance->set_indirect_function_table_sig_ids(nullptr);
+ instance->set_indirect_function_table_targets(nullptr);
instance->set_compiled_module(*compiled_module);
- return instance;
-}
-int32_t WasmInstanceObject::GrowMemory(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- uint32_t pages) {
- DCHECK(instance->has_memory_object());
- return WasmMemoryObject::Grow(
- isolate, handle(instance->memory_object(), isolate), pages);
+ return instance;
}
WasmInstanceObject* WasmInstanceObject::GetOwningInstance(
@@ -654,21 +782,7 @@ WasmInstanceObject* WasmInstanceObject::GetOwningInstance(
Object* weak_link = nullptr;
DCHECK(code->kind() == wasm::WasmCode::kFunction ||
code->kind() == wasm::WasmCode::kInterpreterStub);
- weak_link = code->owner()->compiled_module()->weak_owning_instance();
- DCHECK(weak_link->IsWeakCell());
- WeakCell* cell = WeakCell::cast(weak_link);
- if (cell->cleared()) return nullptr;
- return WasmInstanceObject::cast(cell->value());
-}
-
-WasmInstanceObject* WasmInstanceObject::GetOwningInstanceGC(Code* code) {
- DisallowHeapAllocation no_gc;
- DCHECK(code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_INTERPRETER_ENTRY);
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
- deopt_data->length());
- Object* weak_link = deopt_data->get(0);
+ weak_link = code->native_module()->compiled_module()->weak_owning_instance();
DCHECK(weak_link->IsWeakCell());
WeakCell* cell = WeakCell::cast(weak_link);
if (cell->cleared()) return nullptr;
@@ -683,7 +797,7 @@ void WasmInstanceObject::ValidateInstancesChainForTesting(
CHECK_EQ(JSObject::cast(compiled_module->weak_wasm_module()->value()),
*module_obj);
Object* prev = nullptr;
- int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
+ int found_instances = compiled_module->has_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
while (current_instance->has_next_instance()) {
CHECK((prev == nullptr && !current_instance->has_prev_instance()) ||
@@ -713,36 +827,16 @@ namespace {
void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
DisallowHeapAllocation no_gc;
JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ WasmInstanceObject* instance = reinterpret_cast<WasmInstanceObject*>(*p);
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
// If a link to shared memory instances exists, update the list of memory
// instances before the instance is destroyed.
- WasmCompiledModule* compiled_module = owner->compiled_module();
+ WasmCompiledModule* compiled_module = instance->compiled_module();
wasm::NativeModule* native_module = compiled_module->GetNativeModule();
- if (FLAG_wasm_jit_to_native) {
- if (native_module) {
- TRACE("Finalizing %zu {\n", native_module->instance_id);
- } else {
- TRACE("Finalized already cleaned up compiled module\n");
- }
+ if (native_module) {
+ TRACE("Finalizing %zu {\n", native_module->instance_id);
} else {
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
-
- if (compiled_module->use_trap_handler()) {
- // TODO(6792): No longer needed once WebAssembly code is off heap.
- CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
- DisallowHeapAllocation no_gc;
- FixedArray* code_table = compiled_module->code_table();
- for (int i = 0; i < code_table->length(); ++i) {
- Code* code = Code::cast(code_table->get(i));
- int index = code->trap_handler_index()->value();
- if (index >= 0) {
- trap_handler::ReleaseHandlerData(index);
- code->set_trap_handler_index(
- Smi::FromInt(trap_handler::kInvalidIndex));
- }
- }
- }
+ TRACE("Finalized already cleaned up compiled module\n");
}
WeakCell* weak_wasm_module = compiled_module->weak_wasm_module();
@@ -752,10 +846,9 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
// Weak references to this instance won't be cleared until
// the next GC cycle, so we need to manually break some links (such as
// the weak references from {WasmMemoryObject::instances}.
- if (owner->has_memory_object()) {
- Handle<WasmMemoryObject> memory(owner->memory_object(), isolate);
- Handle<WasmInstanceObject> instance(owner, isolate);
- WasmMemoryObject::RemoveInstance(isolate, memory, instance);
+ if (instance->has_memory_object()) {
+ WasmMemoryObject::RemoveInstance(isolate, handle(instance->memory_object()),
+ handle(instance));
}
// weak_wasm_module may have been cleared, meaning the module object
@@ -778,9 +871,11 @@ void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
+ // Free raw C++ memory associated with the instance.
+ GetNativeAllocations(instance)->free();
+
compiled_module->RemoveFromChain();
- compiled_module->reset_weak_owning_instance();
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
TRACE("}\n");
}
@@ -799,13 +894,17 @@ bool WasmExportedFunction::IsWasmExportedFunction(Object* object) {
Handle<JSFunction> js_function(JSFunction::cast(object));
if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
+#ifdef DEBUG
+ // Any function having code of {JS_TO_WASM_FUNCTION} kind must be an exported
+ // function and hence will have a property holding the instance object.
Handle<Symbol> symbol(
js_function->GetIsolate()->factory()->wasm_instance_symbol());
- MaybeHandle<Object> maybe_result =
+ MaybeHandle<Object> result =
JSObject::GetPropertyOrElement(js_function, symbol);
- Handle<Object> result;
- if (!maybe_result.ToHandle(&result)) return false;
- return result->IsWasmInstanceObject();
+ DCHECK(result.ToHandleChecked()->IsWasmInstanceObject());
+#endif
+
+ return true;
}
WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
@@ -843,18 +942,15 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
- shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
NewFunctionArgs args = NewFunctionArgs::ForWasm(
name, export_wrapper, isolate->sloppy_function_without_prototype_map());
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
// According to the spec, exported functions should not have a [[Construct]]
// method.
DCHECK(!js_function->IsConstructor());
+ js_function->shared()->set_length(arity);
+ js_function->shared()->set_internal_formal_parameter_count(arity);
- js_function->set_shared(*shared);
Handle<Symbol> instance_symbol(isolate->factory()->wasm_instance_symbol());
JSObject::AddProperty(js_function, instance_symbol, instance, DONT_ENUM);
@@ -866,103 +962,51 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
return Handle<WasmExportedFunction>::cast(js_function);
}
-WasmCodeWrapper WasmExportedFunction::GetWasmCode() {
+wasm::WasmCode* WasmExportedFunction::GetWasmCode() {
DisallowHeapAllocation no_gc;
Handle<Code> export_wrapper_code = handle(this->code());
DCHECK_EQ(export_wrapper_code->kind(), Code::JS_TO_WASM_FUNCTION);
- int mask =
- RelocInfo::ModeMask(FLAG_wasm_jit_to_native ? RelocInfo::JS_TO_WASM_CALL
- : RelocInfo::CODE_TARGET);
- auto IsWasmFunctionCode = [](Code* code) {
- return code->kind() == Code::WASM_FUNCTION ||
- code->kind() == Code::WASM_TO_JS_FUNCTION ||
- code->kind() == Code::WASM_TO_WASM_FUNCTION ||
- code->kind() == Code::WASM_INTERPRETER_ENTRY ||
- code->builtin_index() == Builtins::kWasmCompileLazy;
- };
-
- for (RelocIterator it(*export_wrapper_code, mask);; it.next()) {
- DCHECK(!it.done());
- WasmCodeWrapper target;
- if (FLAG_wasm_jit_to_native) {
- target = WasmCodeWrapper(
- GetIsolate()->wasm_engine()->code_manager()->LookupCode(
- it.rinfo()->js_to_wasm_address()));
- } else {
- Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (!IsWasmFunctionCode(code)) continue;
- target = WasmCodeWrapper(handle(code));
- }
-// There should only be this one call to wasm code.
+ int mask = RelocInfo::ModeMask(RelocInfo::JS_TO_WASM_CALL);
+ RelocIterator it(*export_wrapper_code, mask);
+ DCHECK(!it.done());
+ wasm::WasmCode* target =
+ GetIsolate()->wasm_engine()->code_manager()->LookupCode(
+ it.rinfo()->js_to_wasm_address());
#ifdef DEBUG
- for (it.next(); !it.done(); it.next()) {
- if (FLAG_wasm_jit_to_native) {
- UNREACHABLE();
- } else {
- Code* code =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- DCHECK(!IsWasmFunctionCode(code));
- }
- }
+ // There should only be this one call to wasm code.
+ it.next();
+ DCHECK(it.done());
#endif
- return target;
- }
- UNREACHABLE();
-}
-
-bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
- if (!object->IsFixedArray()) return false;
- FixedArray* arr = FixedArray::cast(object);
- if (arr->length() != kFieldCount) return false;
- Isolate* isolate = arr->GetIsolate();
- if (!arr->get(kModuleWrapperIndex)->IsForeign()) return false;
- if (!arr->get(kModuleBytesIndex)->IsUndefined(isolate) &&
- !arr->get(kModuleBytesIndex)->IsSeqOneByteString())
- return false;
- if (!arr->get(kScriptIndex)->IsScript()) return false;
- if (!arr->get(kAsmJsOffsetTableIndex)->IsUndefined(isolate) &&
- !arr->get(kAsmJsOffsetTableIndex)->IsByteArray())
- return false;
- if (!arr->get(kBreakPointInfosIndex)->IsUndefined(isolate) &&
- !arr->get(kBreakPointInfosIndex)->IsFixedArray())
- return false;
- return true;
-}
-
-WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
- DCHECK(IsWasmSharedModuleData(object));
- return reinterpret_cast<WasmSharedModuleData*>(object);
+ return target;
}
-WasmModule* WasmSharedModuleData::module() {
+WasmModule* WasmSharedModuleData::module() const {
// We populate the kModuleWrapper field with a Foreign holding the
// address to the address of a WasmModule. This is because we can
// handle both cases when the WasmModule's lifetime is managed through
// a Managed<WasmModule> object, as well as cases when it's managed
// by the embedder. CcTests fall into the latter case.
return *(reinterpret_cast<WasmModule**>(
- Foreign::cast(get(kModuleWrapperIndex))->foreign_address()));
+ Foreign::cast(module_wrapper())->foreign_address()));
}
Handle<WasmSharedModuleData> WasmSharedModuleData::New(
Isolate* isolate, Handle<Foreign> module_wrapper,
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
Handle<ByteArray> asm_js_offset_table) {
- Handle<FixedArray> arr =
- isolate->factory()->NewFixedArray(kFieldCount, TENURED);
- arr->set(kModuleWrapperIndex, *module_wrapper);
+ Handle<WasmSharedModuleData> data = Handle<WasmSharedModuleData>::cast(
+ isolate->factory()->NewStruct(WASM_SHARED_MODULE_DATA_TYPE, TENURED));
+ data->set_module_wrapper(*module_wrapper);
if (!module_bytes.is_null()) {
- arr->set(kModuleBytesIndex, *module_bytes);
+ data->set_module_bytes(*module_bytes);
}
if (!script.is_null()) {
- arr->set(kScriptIndex, *script);
+ data->set_script(*script);
}
if (!asm_js_offset_table.is_null()) {
- arr->set(kAsmJsOffsetTableIndex, *asm_js_offset_table);
+ data->set_asm_js_offset_table(*asm_js_offset_table);
}
-
- DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*arr));
- return Handle<WasmSharedModuleData>::cast(arr);
+ return data;
}
bool WasmSharedModuleData::is_asm_js() {
@@ -972,47 +1016,6 @@ bool WasmSharedModuleData::is_asm_js() {
return asm_js;
}
-void WasmSharedModuleData::ReinitializeAfterDeserialization(
- Isolate* isolate, Handle<WasmSharedModuleData> shared) {
- DCHECK(shared->get(kModuleWrapperIndex)->IsUndefined(isolate));
-#ifdef DEBUG
- // No BreakpointInfo objects should survive deserialization.
- if (shared->has_breakpoint_infos()) {
- for (int i = 0, e = shared->breakpoint_infos()->length(); i < e; ++i) {
- DCHECK(shared->breakpoint_infos()->get(i)->IsUndefined(isolate));
- }
- }
-#endif
-
- shared->set(kBreakPointInfosIndex, isolate->heap()->undefined_value());
-
- WasmModule* module = nullptr;
- {
- // We parse the module again directly from the module bytes, so
- // the underlying storage must not be moved meanwhile.
- DisallowHeapAllocation no_allocation;
- SeqOneByteString* module_bytes = shared->module_bytes();
- const byte* start =
- reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
- const byte* end = start + module_bytes->length();
- // TODO(titzer): remember the module origin in the compiled_module
- // For now, we assume serialized modules did not originate from asm.js.
- wasm::ModuleResult result =
- SyncDecodeWasmModule(isolate, start, end, false, wasm::kWasmOrigin);
- CHECK(result.ok());
- CHECK_NOT_NULL(result.val);
- // Take ownership of the WasmModule and immediately transfer it to the
- // WasmModuleWrapper below.
- module = result.val.release();
- }
-
- Handle<wasm::WasmModuleWrapper> module_wrapper =
- wasm::WasmModuleWrapper::From(isolate, module);
-
- shared->set(kModuleWrapperIndex, *module_wrapper);
- DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
-}
-
namespace {
int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
@@ -1054,7 +1057,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
breakpoint_infos = handle(shared->breakpoint_infos(), isolate);
} else {
breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
- shared->set(kBreakPointInfosIndex, *breakpoint_infos);
+ shared->set_breakpoint_infos(*breakpoint_infos);
}
int insert_pos =
@@ -1078,7 +1081,7 @@ void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
2 * breakpoint_infos->length(), TENURED);
- shared->set(kBreakPointInfosIndex, *new_breakpoint_infos);
+ shared->set_breakpoint_infos(*new_breakpoint_infos);
// Copy over the entries [0, insert_pos).
for (int i = 0; i < insert_pos; ++i)
new_breakpoint_infos->set(i, breakpoint_infos->get(i));
@@ -1131,16 +1134,6 @@ void WasmSharedModuleData::SetBreakpointsOnNewInstance(
}
}
-void WasmSharedModuleData::PrepareForLazyCompilation(
- Handle<WasmSharedModuleData> shared) {
- if (shared->has_lazy_compilation_orchestrator()) return;
- Isolate* isolate = shared->GetIsolate();
- // TODO(titzer): remove dependency on module-compiler.h
- auto orch_handle =
- Managed<wasm::LazyCompilationOrchestrator>::Allocate(isolate);
- shared->set_lazy_compilation_orchestrator(*orch_handle);
-}
-
namespace {
enum AsmJsOffsetTableEntryLayout {
@@ -1366,78 +1359,41 @@ MaybeHandle<FixedArray> WasmSharedModuleData::CheckBreakPoints(
Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
if (breakpoint_info->source_position() != position) return {};
+ // There is no support for conditional break points. Just assume that every
+ // break point always hits.
Handle<Object> break_points(breakpoint_info->break_points(), isolate);
- return isolate->debug()->GetHitBreakPoints(break_points);
+ if (break_points->IsFixedArray()) {
+ return Handle<FixedArray>::cast(break_points);
+ }
+ Handle<FixedArray> break_points_hit = isolate->factory()->NewFixedArray(1);
+ break_points_hit->set(0, *break_points);
+ return break_points_hit;
}
Handle<WasmCompiledModule> WasmCompiledModule::New(
- Isolate* isolate, WasmModule* module, Handle<FixedArray> code_table,
- Handle<FixedArray> export_wrappers,
- const std::vector<GlobalHandleAddress>& function_tables,
+ Isolate* isolate, WasmModule* module, Handle<FixedArray> export_wrappers,
bool use_trap_handler) {
- Handle<FixedArray> ret =
- isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
- // WasmCompiledModule::cast would fail since fields are not set yet.
- Handle<WasmCompiledModule> compiled_module(
- reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
+ Handle<WasmCompiledModule> compiled_module = Handle<WasmCompiledModule>::cast(
+ isolate->factory()->NewStruct(WASM_COMPILED_MODULE_TYPE, TENURED));
Handle<WeakCell> weak_native_context =
isolate->factory()->NewWeakCell(isolate->native_context());
compiled_module->set_weak_native_context(*weak_native_context);
compiled_module->set_use_trap_handler(use_trap_handler);
- if (!FLAG_wasm_jit_to_native) {
- compiled_module->InitId();
- compiled_module->set_code_table(*code_table);
+ if (!export_wrappers.is_null()) {
compiled_module->set_export_wrappers(*export_wrappers);
- // TODO(mtrofin): we copy these because the order of finalization isn't
- // reliable, and we need these at Reset (which is called at
- // finalization). If the order were reliable, and top-down, we could instead
- // just get them from shared().
- compiled_module->set_num_imported_functions(module->num_imported_functions);
-
- int num_function_tables = static_cast<int>(function_tables.size());
- if (num_function_tables > 0) {
- Handle<FixedArray> ft =
- isolate->factory()->NewFixedArray(num_function_tables, TENURED);
- for (int i = 0; i < num_function_tables; ++i) {
- SetTableValue(isolate, ft, i, function_tables[i]);
- }
- // TODO(wasm): setting the empty tables here this way is OK under the
- // assumption that we compile and then instantiate. It needs rework if we
- // do direct instantiation. The empty tables are used as a default when
- // resetting the compiled module.
- compiled_module->set_function_tables(*ft);
- compiled_module->set_empty_function_tables(*ft);
- }
- } else {
- if (!export_wrappers.is_null()) {
- compiled_module->set_export_wrappers(*export_wrappers);
- }
- wasm::NativeModule* native_module = nullptr;
- {
- std::unique_ptr<wasm::NativeModule> native_module_ptr =
- isolate->wasm_engine()->code_manager()->NewNativeModule(*module);
- native_module = native_module_ptr.release();
- Handle<Foreign> native_module_wrapper =
- Managed<wasm::NativeModule>::From(isolate, native_module);
- compiled_module->set_native_module(*native_module_wrapper);
- Handle<WasmCompiledModule> weak_link =
- isolate->global_handles()->Create(*compiled_module);
- GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
- Handle<Object>::cast(weak_link).location(),
- &CompiledModuleFinalizer,
- v8::WeakCallbackType::kFinalizer);
- compiled_module->GetNativeModule()->SetCompiledModule(weak_link);
- }
- // This is here just because it's easier for APIs that need to work with
- // either code_table or native_module. Otherwise we need to check if
- // has_code_table and pass undefined.
- compiled_module->set_code_table(*code_table);
-
- int function_count = static_cast<int>(module->functions.size());
- Handle<FixedArray> source_positions =
- isolate->factory()->NewFixedArray(function_count, TENURED);
- compiled_module->set_source_positions(*source_positions);
}
+ compiled_module->set_weak_owning_instance(isolate->heap()->empty_weak_cell());
+ wasm::NativeModule* native_module = nullptr;
+ {
+ std::unique_ptr<wasm::NativeModule> native_module_ptr =
+ isolate->wasm_engine()->code_manager()->NewNativeModule(*module);
+ native_module = native_module_ptr.release();
+ Handle<Foreign> native_module_wrapper =
+ Managed<wasm::NativeModule>::From(isolate, native_module);
+ compiled_module->set_native_module(*native_module_wrapper);
+ compiled_module->GetNativeModule()->SetCompiledModule(compiled_module);
+ }
+
// TODO(mtrofin): copy the rest of the specialization parameters over.
// We're currently OK because we're only using defaults.
return compiled_module;
@@ -1446,21 +1402,15 @@ Handle<WasmCompiledModule> WasmCompiledModule::New(
Handle<WasmCompiledModule> WasmCompiledModule::Clone(
Isolate* isolate, Handle<WasmCompiledModule> module) {
Handle<FixedArray> code_copy;
- if (!FLAG_wasm_jit_to_native) {
- code_copy = isolate->factory()->CopyFixedArray(
- handle(module->code_table(), isolate));
- }
Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
- isolate->factory()->CopyFixedArray(module));
- ret->reset_weak_owning_instance();
- ret->reset_next_instance();
- ret->reset_prev_instance();
- ret->reset_weak_exported_functions();
- if (!FLAG_wasm_jit_to_native) {
- ret->InitId();
- ret->set_code_table(*code_copy);
- return ret;
- }
+ isolate->factory()->NewStruct(WASM_COMPILED_MODULE_TYPE, TENURED));
+ ret->set_shared(module->shared());
+ ret->set_weak_native_context(module->weak_native_context());
+ ret->set_export_wrappers(module->export_wrappers());
+ ret->set_weak_wasm_module(module->weak_wasm_module());
+ ret->set_weak_owning_instance(isolate->heap()->empty_weak_cell());
+ ret->set_native_module(module->native_module());
+ ret->set_use_trap_handler(module->use_trap_handler());
Handle<FixedArray> export_copy = isolate->factory()->CopyFixedArray(
handle(module->export_wrappers(), isolate));
@@ -1473,41 +1423,9 @@ Handle<WasmCompiledModule> WasmCompiledModule::Clone(
Handle<Foreign> native_module_wrapper =
Managed<wasm::NativeModule>::From(isolate, native_module.release());
ret->set_native_module(*native_module_wrapper);
- Handle<WasmCompiledModule> weak_link =
- isolate->global_handles()->Create(*ret);
- GlobalHandles::MakeWeak(Handle<Object>::cast(weak_link).location(),
- Handle<Object>::cast(weak_link).location(),
- &CompiledModuleFinalizer,
- v8::WeakCallbackType::kFinalizer);
- ret->GetNativeModule()->SetCompiledModule(weak_link);
-
- if (module->has_lazy_compile_data()) {
- Handle<FixedArray> lazy_comp_data = isolate->factory()->NewFixedArray(
- module->lazy_compile_data()->length(), TENURED);
- ret->set_lazy_compile_data(*lazy_comp_data);
- }
- return ret;
-}
-
-void WasmCompiledModule::SetTableValue(Isolate* isolate,
- Handle<FixedArray> table, int index,
- Address value) {
- Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(
- static_cast<double>(reinterpret_cast<size_t>(value)), MUTABLE, TENURED);
- table->set(index, *number);
-}
-
-void WasmCompiledModule::UpdateTableValue(FixedArray* table, int index,
- Address value) {
- DisallowHeapAllocation no_gc;
- HeapNumber::cast(table->get(index))
- ->set_value(static_cast<double>(reinterpret_cast<size_t>(value)));
-}
+ ret->GetNativeModule()->SetCompiledModule(ret);
-Address WasmCompiledModule::GetTableValue(FixedArray* table, int index) {
- DisallowHeapAllocation no_gc;
- double value = HeapNumber::cast(table->get(index))->value();
- return reinterpret_cast<Address>(static_cast<size_t>(value));
+ return ret;
}
wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
@@ -1515,14 +1433,6 @@ wasm::NativeModule* WasmCompiledModule::GetNativeModule() const {
return Managed<wasm::NativeModule>::cast(native_module())->get();
}
-void WasmCompiledModule::InitId() {
-#if DEBUG
- static uint32_t instance_id_counter = 0;
- set(kID_instance_id, Smi::FromInt(instance_id_counter++));
- TRACE("New compiled module id: %d\n", instance_id());
-#endif
-}
-
void WasmCompiledModule::Reset(Isolate* isolate,
WasmCompiledModule* compiled_module) {
DisallowHeapAllocation no_gc;
@@ -1534,32 +1444,12 @@ void WasmCompiledModule::Reset(Isolate* isolate,
TRACE("Resetting %zu\n", native_module->instance_id);
if (compiled_module->use_trap_handler()) {
- for (uint32_t i = native_module->num_imported_functions(),
- e = native_module->FunctionCount();
- i < e; ++i) {
- wasm::WasmCode* wasm_code = native_module->GetCode(i);
- if (wasm_code->HasTrapHandlerIndex()) {
- CHECK_LT(wasm_code->trap_handler_index(),
- static_cast<size_t>(std::numeric_limits<int>::max()));
- trap_handler::ReleaseHandlerData(
- static_cast<int>(wasm_code->trap_handler_index()));
- wasm_code->ResetTrapHandlerIndex();
- }
- }
+ native_module->ReleaseProtectedInstructions();
}
// Patch code to update memory references, global references, and function
// table references.
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- wasm::CodeSpecialization code_specialization(isolate, &specialization_zone);
-
- if (compiled_module->has_lazy_compile_data()) {
- for (int i = 0, e = compiled_module->lazy_compile_data()->length(); i < e;
- ++i) {
- compiled_module->lazy_compile_data()->set(
- i, isolate->heap()->undefined_value());
- }
- }
+ wasm::CodeSpecialization code_specialization;
for (uint32_t i = native_module->num_imported_functions(),
end = native_module->FunctionCount();
@@ -1567,8 +1457,7 @@ void WasmCompiledModule::Reset(Isolate* isolate,
wasm::WasmCode* code = native_module->GetCode(i);
// Skip lazy compile stubs.
if (code == nullptr || code->kind() != wasm::WasmCode::kFunction) continue;
- bool changed = code_specialization.ApplyToWasmCode(WasmCodeWrapper(code),
- SKIP_ICACHE_FLUSH);
+ bool changed = code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
// TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
// above.
if (changed) {
@@ -1602,54 +1491,11 @@ MaybeHandle<String> WasmSharedModuleData::ExtractUtf8StringFromModuleBytes(
static_cast<int>(ref.length()));
}
-bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
- if (!obj->IsFixedArray()) return false;
- FixedArray* arr = FixedArray::cast(obj);
- if (arr->length() != PropertyIndices::Count) return false;
-#define WCM_CHECK_TYPE(NAME, TYPE_CHECK) \
- do { \
- Object* obj = arr->get(kID_##NAME); \
- if (!(TYPE_CHECK)) return false; \
- } while (false);
-// We're OK with undefined, generally, because maybe we don't
-// have a value for that item. For example, we may not have a
-// memory, or globals.
-// We're not OK with the const numbers being undefined. They are
-// expected to be initialized at construction.
-#define WCM_CHECK_OBJECT(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
-#define WCM_CHECK_CONST_OBJECT(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
-#define WCM_CHECK_WASM_OBJECT(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsFixedArray() || obj->IsUndefined(isolate))
-#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT(WeakCell, NAME)
-#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->IsSmi())
-#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
-#define WCM_CHECK_SMALL_CONST_NUMBER(TYPE, NAME) \
- WCM_CHECK_TYPE(NAME, obj->IsSmi())
-#undef WCM_CHECK_TYPE
-#undef WCM_CHECK_OBJECT
-#undef WCM_CHECK_CONST_OBJECT
-#undef WCM_CHECK_WASM_OBJECT
-#undef WCM_CHECK_WEAK_LINK
-#undef WCM_CHECK_SMALL_NUMBER
-#undef WCM_CHECK
-#undef WCM_CHECK_SMALL_CONST_NUMBER
-
- // All checks passed.
- return true;
-}
-
void WasmCompiledModule::PrintInstancesChain() {
#if DEBUG
if (!FLAG_trace_wasm_instances) return;
for (WasmCompiledModule* current = this; current != nullptr;) {
- if (FLAG_wasm_jit_to_native) {
- PrintF("->%zu", current->GetNativeModule()->instance_id);
- } else {
- PrintF("->%d", current->instance_id());
- }
+ PrintF("->%zu", current->GetNativeModule()->instance_id);
if (!current->has_next_instance()) break;
current = current->next_instance();
}
@@ -1669,61 +1515,22 @@ void WasmCompiledModule::RemoveFromChain() {
DisallowHeapAllocation no_gc;
Isolate* isolate = GetIsolate();
- Object* next = get(kID_next_instance);
- Object* prev = get(kID_prev_instance);
+ Object* next = raw_next_instance();
+ Object* prev = raw_prev_instance();
if (!prev->IsUndefined(isolate)) {
- WasmCompiledModule::cast(prev)->set(kID_next_instance, next);
+ WasmCompiledModule::cast(prev)->set_raw_next_instance(next);
}
if (!next->IsUndefined(isolate)) {
- WasmCompiledModule::cast(next)->set(kID_prev_instance, prev);
+ WasmCompiledModule::cast(next)->set_raw_prev_instance(prev);
}
}
-void WasmCompiledModule::OnWasmModuleDecodingComplete(
- Handle<WasmSharedModuleData> shared) {
- set_shared(*shared);
-}
-
void WasmCompiledModule::ReinitializeAfterDeserialization(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- // This method must only be called immediately after deserialization.
- // At this point, no module wrapper exists, so the shared module data is
- // incomplete.
- Handle<WasmSharedModuleData> shared(
- static_cast<WasmSharedModuleData*>(compiled_module->get(kID_shared)),
- isolate);
- if (!FLAG_wasm_jit_to_native) {
- DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
- WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
- }
- size_t function_table_count =
- compiled_module->shared()->module()->function_tables.size();
-
- if (function_table_count > 0) {
- // The tables are of the right size, but contain bogus global handle
- // addresses. Produce new global handles for the empty tables, then reset,
- // which will relocate the code. We end up with a WasmCompiledModule as-if
- // it were just compiled.
- if (!WASM_CONTEXT_TABLES) {
- DCHECK(compiled_module->has_function_tables());
- Handle<FixedArray> function_tables(
- compiled_module->empty_function_tables(), isolate);
- for (size_t i = 0; i < function_table_count; ++i) {
- Handle<Object> global_func_table_handle =
- isolate->global_handles()->Create(
- isolate->heap()->undefined_value());
- GlobalHandleAddress new_func_table = global_func_table_handle.address();
- SetTableValue(isolate, function_tables, static_cast<int>(i),
- new_func_table);
- }
- }
- }
-
// Reset, but don't delete any global handles, because their owning instance
// may still be active.
WasmCompiledModule::Reset(isolate, *compiled_module);
- DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
}
MaybeHandle<String> WasmSharedModuleData::GetModuleNameOrNull(
@@ -1737,9 +1544,10 @@ MaybeHandle<String> WasmSharedModuleData::GetFunctionNameOrNull(
Isolate* isolate, Handle<WasmSharedModuleData> shared,
uint32_t func_index) {
DCHECK_LT(func_index, shared->module()->functions.size());
- WasmFunction& function = shared->module()->functions[func_index];
- if (!function.name.is_set()) return {};
- return ExtractUtf8StringFromModuleBytes(isolate, shared, function.name);
+ wasm::WireBytesRef name =
+ shared->module()->LookupName(shared->module_bytes(), func_index);
+ if (!name.is_set()) return {};
+ return ExtractUtf8StringFromModuleBytes(isolate, shared, name);
}
Handle<String> WasmSharedModuleData::GetFunctionName(
@@ -1753,12 +1561,11 @@ Handle<String> WasmSharedModuleData::GetFunctionName(
Vector<const uint8_t> WasmSharedModuleData::GetRawFunctionName(
uint32_t func_index) {
DCHECK_GT(module()->functions.size(), func_index);
- WasmFunction& function = module()->functions[func_index];
SeqOneByteString* bytes = module_bytes();
- DCHECK_GE(bytes->length(), function.name.end_offset());
- return Vector<const uint8_t>(
- bytes->GetCharsAddress() + function.name.offset(),
- function.name.length());
+ wasm::WireBytesRef name = module()->LookupName(bytes, func_index);
+ DCHECK_GE(bytes->length(), name.end_offset());
+ return Vector<const uint8_t>(bytes->GetCharsAddress() + name.offset(),
+ name.length());
}
int WasmSharedModuleData::GetFunctionOffset(uint32_t func_index) {
@@ -1839,6 +1646,8 @@ bool WasmCompiledModule::SetBreakPoint(
}
void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
+ if (!wasm::WasmCode::ShouldBeLogged(isolate)) return;
+
wasm::NativeModule* native_module = GetNativeModule();
if (native_module == nullptr) return;
const uint32_t number_of_codes = native_module->FunctionCount();
@@ -1847,60 +1656,12 @@ void WasmCompiledModule::LogWasmCodes(Isolate* isolate) {
for (uint32_t i = 0; i < number_of_codes; i++) {
wasm::WasmCode* code = native_module->GetCode(i);
if (code == nullptr) continue;
- int name_length;
- Handle<String> name(
- WasmSharedModuleData::GetFunctionName(isolate, shared_handle, i));
- auto cname = name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
- RobustnessFlag::ROBUST_STRING_TRAVERSAL,
- &name_length);
- wasm::WasmName wasm_name(cname.get(), name_length);
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, code,
- wasm_name));
+ code->LogCode(isolate);
}
}
}
-void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
- MaybeHandle<WeakCell> weak_instance,
- int func_index) {
- DCHECK(weak_instance.is_null() ||
- weak_instance.ToHandleChecked()->value()->IsWasmInstanceObject());
- Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
- if (!weak_instance.is_null()) {
- // TODO(wasm): Introduce constants for the indexes in wasm deopt data.
- deopt_data->set(0, *weak_instance.ToHandleChecked());
- }
- deopt_data->set(1, Smi::FromInt(func_index));
-
- code->set_deoptimization_data(*deopt_data);
-}
-
-void AttachWasmFunctionInfo(Isolate* isolate, Handle<Code> code,
- MaybeHandle<WasmInstanceObject> instance,
- int func_index) {
- MaybeHandle<WeakCell> weak_instance;
- if (!instance.is_null()) {
- weak_instance = isolate->factory()->NewWeakCell(instance.ToHandleChecked());
- }
- AttachWasmFunctionInfo(isolate, code, weak_instance, func_index);
-}
-
-WasmFunctionInfo GetWasmFunctionInfo(Isolate* isolate, Handle<Code> code) {
- FixedArray* deopt_data = code->deoptimization_data();
- DCHECK_LE(2, deopt_data->length());
- MaybeHandle<WasmInstanceObject> instance;
- Object* maybe_weak_instance = deopt_data->get(0);
- if (maybe_weak_instance->IsWeakCell()) {
- Object* maybe_instance = WeakCell::cast(maybe_weak_instance)->value();
- if (maybe_instance) {
- instance = handle(WasmInstanceObject::cast(maybe_instance), isolate);
- }
- }
- int func_index = Smi::ToInt(deopt_data->get(1));
- return {instance, func_index};
-}
-
#undef TRACE
-
+#undef TRACE_IFT
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index fe2ed419db..68b3c74063 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -1,5 +1,5 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
+// Copyright 2016 the V8 project authors. All rights reserved. Use of
+// this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_WASM_OBJECTS_H_
@@ -29,7 +29,6 @@ class NativeModule;
class WasmCode;
struct WasmModule;
class SignatureMap;
-typedef Address GlobalHandleAddress;
using ValueType = MachineRepresentation;
using FunctionSig = Signature<ValueType>;
} // namespace wasm
@@ -38,67 +37,67 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
-#define WASM_CONTEXT_TABLES FLAG_wasm_jit_to_native
-
-#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
-#define DECL_OOL_CAST(type) static type* cast(Object* object);
-
-#define DECL_GETTER(name, type) type* name();
-
#define DECL_OPTIONAL_ACCESSORS(name, type) \
INLINE(bool has_##name()); \
DECL_ACCESSORS(name, type)
-#define DEF_SIZE(parent) \
- static const int kSize = parent::kHeaderSize + kFieldCount * kPointerSize; \
- static const int kParentSize = parent::kHeaderSize; \
- static const int kHeaderSize = kSize;
-#define DEF_OFFSET(name) \
- static const int k##name##Offset = \
- kSize + (k##name##Index - kFieldCount) * kPointerSize;
-
-// An entry in an indirect dispatch table.
-struct IndirectFunctionTableEntry {
- int32_t sig_id = 0;
- WasmContext* context = nullptr;
- Address target = nullptr;
-
- MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(IndirectFunctionTableEntry)
+// An entry in an indirect function table (IFT).
+// Each entry in the IFT has the following fields:
+// - instance = target instance
+// - sig_id = signature id of function
+// - target = entrypoint to wasm code for the function, or wasm-to-js wrapper
+class IndirectFunctionTableEntry {
+ public:
+ inline IndirectFunctionTableEntry(WasmInstanceObject*, int index);
+
+ void clear();
+ void set(int sig_id, WasmInstanceObject* instance,
+ const wasm::WasmCode* wasm_code);
+
+ WasmInstanceObject* instance();
+ int sig_id();
+ Address target();
+
+ private:
+#ifdef DEBUG
+ DisallowHeapAllocation no_gc;
+#endif
+ WasmInstanceObject* const instance_;
+ int const index_;
};
-// Wasm context used to store the mem_size and mem_start address of the linear
-// memory. These variables can be accessed at C++ level at graph build time
-// (e.g., initialized during instance building / changed at runtime by
-// grow_memory). The address of the WasmContext is provided to the wasm entry
-// functions using a RelocatableIntPtrConstant, then the address is passed as
-// parameter to the other wasm functions.
-// Note that generated code can directly read from instances of this struct.
-struct WasmContext {
- byte* mem_start = nullptr;
- uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
- uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
- byte* globals_start = nullptr;
- // TODO(wasm): pad these entries to a power of two.
- IndirectFunctionTableEntry* table = nullptr;
- uint32_t table_size = 0;
-
- void SetRawMemory(void* mem_start, size_t mem_size) {
- DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
- this->mem_start = static_cast<byte*>(mem_start);
- this->mem_size = static_cast<uint32_t>(mem_size);
- this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
- DCHECK_LE(mem_size, this->mem_mask + 1);
- }
-
- ~WasmContext() {
- if (table) free(table);
- mem_start = nullptr;
- mem_size = 0;
- mem_mask = 0;
- globals_start = nullptr;
- table = nullptr;
- table_size = 0;
- }
+// An entry for an imported function.
+// (note this is not called a "table" since it is not dynamically indexed).
+// The imported function entries are used to call imported functions.
+// For each imported function there is an entry which is either:
+// - an imported JSReceiver, which has fields
+// - instance = importing instance
+// - receiver = JSReceiver, either a JS function or other callable
+// - target = pointer to wasm-to-js wrapper code entrypoint
+// - an imported wasm function from another instance, which has fields
+// - instance = target instance
+// - target = entrypoint to wasm code of the function
+class ImportedFunctionEntry {
+ public:
+ inline ImportedFunctionEntry(WasmInstanceObject*, int index);
+
+ // Initialize this entry as a {JSReceiver} call.
+ void set(JSReceiver* callable, const wasm::WasmCode* wasm_to_js_wrapper);
+ // Initialize this entry as a WASM to WASM call.
+ void set(WasmInstanceObject* target_instance,
+ const wasm::WasmCode* wasm_function);
+
+ WasmInstanceObject* instance();
+ JSReceiver* callable();
+ Address target();
+ bool is_js_receiver_entry();
+
+ private:
+#ifdef DEBUG
+ DisallowHeapAllocation no_gc;
+#endif
+ WasmInstanceObject* const instance_;
+ int const index_;
};
// Representation of a WebAssembly.Module JavaScript-level object.
@@ -109,13 +108,14 @@ class WasmModuleObject : public JSObject {
// Shared compiled code between multiple WebAssembly.Module objects.
DECL_ACCESSORS(compiled_module, WasmCompiledModule)
- enum { // --
- kCompiledModuleIndex,
- kFieldCount
- };
+// Layout description.
+#define WASM_MODULE_OBJECT_FIELDS(V) \
+ V(kCompiledModuleOffset, kPointerSize) \
+ V(kSize, 0)
- DEF_SIZE(JSObject)
- DEF_OFFSET(CompiledModule)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ WASM_MODULE_OBJECT_FIELDS)
+#undef WASM_MODULE_OBJECT_FIELDS
static Handle<WasmModuleObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
@@ -134,20 +134,17 @@ class WasmTableObject : public JSObject {
DECL_ACCESSORS(maximum_length, Object)
DECL_ACCESSORS(dispatch_tables, FixedArray)
- enum { // --
- kFunctionsIndex,
- kMaximumLengthIndex,
- kDispatchTablesIndex,
- kFieldCount
- };
+// Layout description.
+#define WASM_TABLE_OBJECT_FIELDS(V) \
+ V(kFunctionsOffset, kPointerSize) \
+ V(kMaximumLengthOffset, kPointerSize) \
+ V(kDispatchTablesOffset, kPointerSize) \
+ V(kSize, 0)
- DEF_SIZE(JSObject)
- DEF_OFFSET(Functions)
- DEF_OFFSET(MaximumLength)
- DEF_OFFSET(DispatchTables)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, WASM_TABLE_OBJECT_FIELDS)
+#undef WASM_TABLE_OBJECT_FIELDS
inline uint32_t current_length();
- inline bool has_maximum_length();
void Grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
@@ -155,8 +152,7 @@ class WasmTableObject : public JSObject {
Handle<FixedArray>* js_functions);
static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance,
- int table_index,
- Handle<FixedArray> function_table);
+ int table_index);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
@@ -165,7 +161,7 @@ class WasmTableObject : public JSObject {
Handle<WasmTableObject> table,
int table_index, wasm::FunctionSig* sig,
Handle<WasmInstanceObject> from_instance,
- WasmCodeWrapper wasm_code, int func_index);
+ wasm::WasmCode* wasm_code);
static void ClearDispatchTables(Handle<WasmTableObject> table, int index);
};
@@ -177,27 +173,23 @@ class WasmMemoryObject : public JSObject {
DECL_ACCESSORS(array_buffer, JSArrayBuffer)
DECL_INT_ACCESSORS(maximum_pages)
- DECL_OPTIONAL_ACCESSORS(instances, WeakFixedArray)
- DECL_ACCESSORS(wasm_context, Managed<WasmContext>)
-
- enum { // --
- kArrayBufferIndex,
- kMaximumPagesIndex,
- kInstancesIndex,
- kWasmContextIndex,
- kFieldCount
- };
-
- DEF_SIZE(JSObject)
- DEF_OFFSET(ArrayBuffer)
- DEF_OFFSET(MaximumPages)
- DEF_OFFSET(Instances)
- DEF_OFFSET(WasmContext)
-
- // Add an instance to the internal (weak) list. amortized O(n).
+ DECL_OPTIONAL_ACCESSORS(instances, FixedArrayOfWeakCells)
+
+// Layout description.
+#define WASM_MEMORY_OBJECT_FIELDS(V) \
+ V(kArrayBufferOffset, kPointerSize) \
+ V(kMaximumPagesOffset, kPointerSize) \
+ V(kInstancesOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ WASM_MEMORY_OBJECT_FIELDS)
+#undef WASM_MEMORY_OBJECT_FIELDS
+
+ // Add an instance to the internal (weak) list.
static void AddInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
- // Remove an instance from the internal (weak) list. O(n).
+ // Remove an instance from the internal (weak) list.
static void RemoveInstance(Isolate* isolate, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> object);
uint32_t current_pages();
@@ -209,67 +201,133 @@ class WasmMemoryObject : public JSObject {
static int32_t Grow(Isolate*, Handle<WasmMemoryObject>, uint32_t pages);
};
+// Representation of a WebAssembly.Global JavaScript-level object.
+class WasmGlobalObject : public JSObject {
+ public:
+ DECL_CAST(WasmGlobalObject)
+
+ DECL_ACCESSORS(array_buffer, JSArrayBuffer)
+ DECL_INT32_ACCESSORS(offset)
+ DECL_INT_ACCESSORS(flags)
+ DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
+ DECL_BOOLEAN_ACCESSORS(is_mutable)
+
+#define WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS(V, _) \
+ V(TypeBits, wasm::ValueType, 8, _) \
+ V(IsMutableBit, bool, 1, _)
+
+ DEFINE_BIT_FIELDS(WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS)
+
+#undef WASM_GLOBAL_OBJECT_FLAGS_BIT_FIELDS
+
+// Layout description.
+#define WASM_GLOBAL_OBJECT_FIELDS(V) \
+ V(kArrayBufferOffset, kPointerSize) \
+ V(kOffsetOffset, kPointerSize) \
+ V(kFlagsOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ WASM_GLOBAL_OBJECT_FIELDS)
+#undef WASM_GLOBAL_OBJECT_FIELDS
+
+ V8_EXPORT_PRIVATE static MaybeHandle<WasmGlobalObject> New(
+ Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, wasm::ValueType type,
+ int32_t offset, bool is_mutable);
+
+ static inline uint32_t TypeSize(wasm::ValueType);
+ inline uint32_t type_size() const;
+
+ inline int32_t GetI32();
+ inline float GetF32();
+ inline double GetF64();
+
+ inline void SetI32(int32_t value);
+ inline void SetF32(float value);
+ inline void SetF64(double value);
+
+ private:
+ // This function returns the address of the global's data in the
+ // JSArrayBuffer. This buffer may be allocated on-heap, in which case it may
+ // not have a fixed address.
+ inline Address address() const;
+};
+
// A WebAssembly.Instance JavaScript-level object.
class WasmInstanceObject : public JSObject {
public:
DECL_CAST(WasmInstanceObject)
- DECL_ACCESSORS(wasm_context, Managed<WasmContext>)
DECL_ACCESSORS(compiled_module, WasmCompiledModule)
DECL_ACCESSORS(exports_object, JSObject)
DECL_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject)
DECL_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer)
DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(table_object, WasmTableObject)
- DECL_OPTIONAL_ACCESSORS(function_tables, FixedArray)
-
- // FixedArray of all instances whose code was imported
- DECL_OPTIONAL_ACCESSORS(directly_called_instances, FixedArray)
- DECL_ACCESSORS(js_imports_table, FixedArray)
-
- enum { // --
- kWasmContextIndex,
- kCompiledModuleIndex,
- kExportsObjectIndex,
- kMemoryObjectIndex,
- kGlobalsBufferIndex,
- kDebugInfoIndex,
- kTableObjectIndex,
- kFunctionTablesIndex,
- kDirectlyCalledInstancesIndex,
- kJsImportsTableIndex,
- kFieldCount
- };
-
- DEF_SIZE(JSObject)
- DEF_OFFSET(WasmContext)
- DEF_OFFSET(CompiledModule)
- DEF_OFFSET(ExportsObject)
- DEF_OFFSET(MemoryObject)
- DEF_OFFSET(GlobalsBuffer)
- DEF_OFFSET(DebugInfo)
- DEF_OFFSET(TableObject)
- DEF_OFFSET(FunctionTables)
- DEF_OFFSET(DirectlyCalledInstances)
- DEF_OFFSET(JsImportsTable)
+ DECL_ACCESSORS(imported_function_instances, FixedArray)
+ DECL_ACCESSORS(imported_function_callables, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(indirect_function_table_instances, FixedArray)
+ DECL_OPTIONAL_ACCESSORS(managed_native_allocations, Foreign)
+ DECL_OPTIONAL_ACCESSORS(managed_indirect_patcher, Foreign)
+ DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
+ DECL_PRIMITIVE_ACCESSORS(memory_size, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(memory_mask, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(imported_function_targets, Address*)
+ DECL_PRIMITIVE_ACCESSORS(globals_start, byte*)
+ DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
+ DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
+
+// Layout description.
+#define WASM_INSTANCE_OBJECT_FIELDS(V) \
+ V(kCompiledModuleOffset, kPointerSize) \
+ V(kExportsObjectOffset, kPointerSize) \
+ V(kMemoryObjectOffset, kPointerSize) \
+ V(kGlobalsBufferOffset, kPointerSize) \
+ V(kDebugInfoOffset, kPointerSize) \
+ V(kTableObjectOffset, kPointerSize) \
+ V(kFunctionTablesOffset, kPointerSize) \
+ V(kImportedFunctionInstancesOffset, kPointerSize) \
+ V(kImportedFunctionCallablesOffset, kPointerSize) \
+ V(kIndirectFunctionTableInstancesOffset, kPointerSize) \
+ V(kManagedNativeAllocationsOffset, kPointerSize) \
+ V(kManagedIndirectPatcherOffset, kPointerSize) \
+ V(kFirstUntaggedOffset, 0) /* marker */ \
+ V(kMemoryStartOffset, kPointerSize) /* untagged */ \
+ V(kMemorySizeOffset, kUInt32Size) /* untagged */ \
+ V(kMemoryMaskOffset, kUInt32Size) /* untagged */ \
+ V(kImportedFunctionTargetsOffset, kPointerSize) /* untagged */ \
+ V(kGlobalsStartOffset, kPointerSize) /* untagged */ \
+ V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
+ V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
+ V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
+ V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ WASM_INSTANCE_OBJECT_FIELDS)
+#undef WASM_INSTANCE_OBJECT_FIELDS
WasmModuleObject* module_object();
V8_EXPORT_PRIVATE wasm::WasmModule* module();
+ static bool EnsureIndirectFunctionTableWithMinimumSize(
+ Handle<WasmInstanceObject> instance, uint32_t minimum_size);
+
+ bool has_indirect_function_table();
+
+ void SetRawMemory(byte* mem_start, uint32_t mem_size);
+
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
static Handle<WasmDebugInfo> GetOrCreateDebugInfo(Handle<WasmInstanceObject>);
static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmCompiledModule>);
- static int32_t GrowMemory(Isolate*, Handle<WasmInstanceObject>,
- uint32_t pages);
-
// Assumed to be called with a code object associated to a wasm module
// instance. Intended to be called from runtime functions. Returns nullptr on
// failing to get owning instance.
static WasmInstanceObject* GetOwningInstance(const wasm::WasmCode* code);
- static WasmInstanceObject* GetOwningInstanceGC(Code* code);
static void ValidateInstancesChainForTesting(
Isolate* isolate, Handle<WasmModuleObject> module_obj,
@@ -280,6 +338,11 @@ class WasmInstanceObject : public JSObject {
static void InstallFinalizer(Isolate* isolate,
Handle<WasmInstanceObject> instance);
+
+ // Iterates all fields in the object except the untagged fields.
+ class BodyDescriptor;
+ // No weak fields.
+ typedef BodyDescriptor BodyDescriptorWeak;
};
// A WASM function that is wrapped and exported to JavaScript.
@@ -297,53 +360,48 @@ class WasmExportedFunction : public JSFunction {
int func_index, int arity,
Handle<Code> export_wrapper);
- WasmCodeWrapper GetWasmCode();
+ wasm::WasmCode* GetWasmCode();
};
// Information shared by all WasmCompiledModule objects for the same module.
-class WasmSharedModuleData : public FixedArray {
+class WasmSharedModuleData : public Struct {
public:
- DECL_OOL_QUERY(WasmSharedModuleData)
- DECL_OOL_CAST(WasmSharedModuleData)
-
- DECL_GETTER(module, wasm::WasmModule)
- DECL_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString)
+ DECL_ACCESSORS(module_wrapper, Object)
+ wasm::WasmModule* module() const;
+ DECL_ACCESSORS(module_bytes, SeqOneByteString)
DECL_ACCESSORS(script, Script)
DECL_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray)
DECL_OPTIONAL_ACCESSORS(breakpoint_infos, FixedArray)
+ inline void reset_breakpoint_infos();
+
+ DECL_CAST(WasmSharedModuleData)
- enum { // --
- kModuleWrapperIndex,
- kModuleBytesIndex,
- kScriptIndex,
- kAsmJsOffsetTableIndex,
- kBreakPointInfosIndex,
- kLazyCompilationOrchestratorIndex,
- kFieldCount
- };
-
- DEF_SIZE(FixedArray)
- DEF_OFFSET(ModuleWrapper)
- DEF_OFFSET(ModuleBytes)
- DEF_OFFSET(Script)
- DEF_OFFSET(AsmJsOffsetTable)
- DEF_OFFSET(BreakPointInfos)
- DEF_OFFSET(LazyCompilationOrchestrator)
+ // Dispatched behavior.
+ DECL_PRINTER(WasmSharedModuleData)
+ DECL_VERIFIER(WasmSharedModuleData)
+
+// Layout description.
+#define WASM_SHARED_MODULE_DATA_FIELDS(V) \
+ V(kModuleWrapperOffset, kPointerSize) \
+ V(kModuleBytesOffset, kPointerSize) \
+ V(kScriptOffset, kPointerSize) \
+ V(kAsmJsOffsetTableOffset, kPointerSize) \
+ V(kBreakPointInfosOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ WASM_SHARED_MODULE_DATA_FIELDS)
+#undef WASM_SHARED_MODULE_DATA_FIELDS
// Check whether this module was generated from asm.js source.
bool is_asm_js();
- static void ReinitializeAfterDeserialization(Isolate*,
- Handle<WasmSharedModuleData>);
-
static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
Handle<BreakPoint> break_point);
static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
Handle<WasmInstanceObject>);
- static void PrepareForLazyCompilation(Handle<WasmSharedModuleData>);
-
static Handle<WasmSharedModuleData> New(
Isolate* isolate, Handle<Foreign> module_wrapper,
Handle<SeqOneByteString> module_bytes, Handle<Script> script,
@@ -419,58 +477,49 @@ class WasmSharedModuleData : public FixedArray {
static MaybeHandle<FixedArray> CheckBreakPoints(Isolate*,
Handle<WasmSharedModuleData>,
int position);
-
- DECL_OPTIONAL_ACCESSORS(lazy_compilation_orchestrator, Foreign)
};
// This represents the set of wasm compiled functions, together
// with all the information necessary for re-specializing them.
-//
-// We specialize wasm functions to their instance by embedding:
-// - raw pointer to the wasm_context, that contains the size of the
-// memory and the pointer to the backing store of the array buffer
-// used as memory of a particular WebAssembly.Instance object. This
-// information are then used at runtime to access memory / verify bounds
-// check limits.
-// - the objects representing the function tables and signature tables
-//
-// Even without instantiating, we need values for all of these parameters.
-// We need to track these values to be able to create new instances and
-// to be able to serialize/deserialize.
-// The design decisions for how we track these values is not too immediate,
-// and it deserves a summary. The "tricky" ones are: memory, globals, and
-// the tables (signature and functions).
-// For tables, we need to hold a reference to the JS Heap object, because
-// we embed them as objects, and they may move.
-class WasmCompiledModule : public FixedArray {
+class WasmCompiledModule : public Struct {
public:
- enum { // --
- kFieldCount
- };
-
- static WasmCompiledModule* cast(Object* fixed_array) {
- SLOW_DCHECK(IsWasmCompiledModule(fixed_array));
- return reinterpret_cast<WasmCompiledModule*>(fixed_array);
- }
-
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK, SETTER_MODIFIER) \
- public: \
- inline TYPE* maybe_##NAME() const; \
- inline TYPE* NAME() const; \
- inline bool has_##NAME() const; \
- inline void reset_##NAME(); \
- \
- SETTER_MODIFIER: \
- inline void set_##NAME(TYPE* value);
-
-#define WCM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), public)
-
-#define WCM_CONST_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE(), private)
-
-#define WCM_WASM_OBJECT(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj), private)
+ DECL_CAST(WasmCompiledModule)
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmCompiledModule)
+ DECL_VERIFIER(WasmCompiledModule)
+
+// Layout description.
+#define WASM_COMPILED_MODULE_FIELDS(V) \
+ V(kSharedOffset, kPointerSize) \
+ V(kNativeContextOffset, kPointerSize) \
+ V(kExportWrappersOffset, kPointerSize) \
+ V(kNextInstanceOffset, kPointerSize) \
+ V(kPrevInstanceOffset, kPointerSize) \
+ V(kOwningInstanceOffset, kPointerSize) \
+ V(kWasmModuleOffset, kPointerSize) \
+ V(kNativeModuleOffset, kPointerSize) \
+ V(kLazyCompileDataOffset, kPointerSize) \
+ V(kUseTrapHandlerOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ WASM_COMPILED_MODULE_FIELDS)
+#undef WASM_COMPILED_MODULE_FIELDS
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, SETTER_MODIFIER) \
+ public: \
+ inline TYPE* NAME() const; \
+ inline bool has_##NAME() const; \
+ inline void reset_##NAME(); \
+ \
+ SETTER_MODIFIER: \
+ inline void set_##NAME(TYPE* value, \
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, public)
+
+#define WCM_CONST_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, private)
#define WCM_SMALL_CONST_NUMBER(TYPE, NAME) \
public: \
@@ -479,82 +528,45 @@ class WasmCompiledModule : public FixedArray {
private: \
inline void set_##NAME(TYPE value);
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell(), \
- public) \
- \
- public: \
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, public) \
+ \
+ public: \
inline TYPE* NAME() const;
-// Add values here if they are required for creating new instances or
-// for deserialization, and if they are serializable.
-// By default, instance values go to WasmInstanceObject, however, if
-// we embed the generated code with a value, then we track that value here.
-#define CORE_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
- MACRO(WEAK_LINK, Context, native_context) \
- MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
- MACRO(OBJECT, FixedArray, weak_exported_functions) \
- MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
- MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
- MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
- MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
- MACRO(OBJECT, FixedArray, source_positions) \
- MACRO(OBJECT, Foreign, native_module) \
- MACRO(OBJECT, FixedArray, lazy_compile_data) \
- MACRO(SMALL_CONST_NUMBER, bool, use_trap_handler)
-
-#define GC_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
- MACRO(CONST_OBJECT, FixedArray, code_table) \
- MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(CONST_OBJECT, FixedArray, empty_function_tables)
-
-// TODO(mtrofin): this is unnecessary when we stop needing
-// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
-#if DEBUG
-#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_CONST_NUMBER, uint32_t, instance_id)
-#else
-#define DEBUG_ONLY_TABLE(IGNORE)
-
- public:
- uint32_t instance_id() const { return static_cast<uint32_t>(-1); }
-#endif
-
-#define WCM_PROPERTY_TABLE(MACRO) \
- CORE_WCM_PROPERTY_TABLE(MACRO) \
- GC_WCM_PROPERTY_TABLE(MACRO) \
- DEBUG_ONLY_TABLE(MACRO)
-
- private:
- enum PropertyIndices {
-#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
- WCM_PROPERTY_TABLE(INDICES) Count
-#undef INDICES
- };
+ // Add values here if they are required for creating new instances or
+ // for deserialization, and if they are serializable.
+ // By default, instance values go to WasmInstanceObject, however, if
+ // we embed the generated code with a value, then we track that value here.
+ WCM_OBJECT(WasmSharedModuleData, shared)
+ WCM_WEAK_LINK(Context, native_context)
+ WCM_OBJECT(FixedArray, export_wrappers)
+ WCM_CONST_OBJECT(WasmCompiledModule, next_instance)
+ WCM_CONST_OBJECT(WasmCompiledModule, prev_instance)
+ WCM_WEAK_LINK(WasmInstanceObject, owning_instance)
+ WCM_WEAK_LINK(WasmModuleObject, wasm_module)
+ WCM_OBJECT(Foreign, native_module)
+ // TODO(mstarzinger): Make {use_trap_handler} smaller.
+ WCM_SMALL_CONST_NUMBER(bool, use_trap_handler)
public:
static Handle<WasmCompiledModule> New(
- Isolate* isolate, wasm::WasmModule* module, Handle<FixedArray> code_table,
+ Isolate* isolate, wasm::WasmModule* module,
Handle<FixedArray> export_wrappers,
- const std::vector<wasm::GlobalHandleAddress>& function_tables,
bool use_trap_hander);
static Handle<WasmCompiledModule> Clone(Isolate* isolate,
Handle<WasmCompiledModule> module);
static void Reset(Isolate* isolate, WasmCompiledModule* module);
+ bool has_instance() const;
+
wasm::NativeModule* GetNativeModule() const;
void InsertInChain(WasmModuleObject*);
void RemoveFromChain();
- void OnWasmModuleDecodingComplete(Handle<WasmSharedModuleData>);
-
-#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
- WCM_PROPERTY_TABLE(DECLARATION)
-#undef DECLARATION
- public:
- static bool IsWasmCompiledModule(Object* obj);
+ DECL_ACCESSORS(raw_next_instance, Object);
+ DECL_ACCESSORS(raw_prev_instance, Object);
void PrintInstancesChain();
@@ -570,52 +582,39 @@ class WasmCompiledModule : public FixedArray {
static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
Handle<BreakPoint> break_point);
- inline void ReplaceCodeTableForTesting(
- std::vector<wasm::WasmCode*>&& testing_table);
-
- // TODO(mtrofin): following 4 unnecessary after we're done with
- // FLAG_wasm_jit_to_native
- static void SetTableValue(Isolate* isolate, Handle<FixedArray> table,
- int index, Address value);
- static void UpdateTableValue(FixedArray* table, int index, Address value);
- static Address GetTableValue(FixedArray* table, int index);
- inline void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table);
-
void LogWasmCodes(Isolate* isolate);
private:
- void InitId();
-
DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
};
-class WasmDebugInfo : public FixedArray {
+class WasmDebugInfo : public Struct {
public:
- DECL_OOL_QUERY(WasmDebugInfo)
- DECL_OOL_CAST(WasmDebugInfo)
-
- DECL_GETTER(wasm_instance, WasmInstanceObject)
+ DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
+ DECL_ACCESSORS(interpreter_handle, Object);
+ DECL_ACCESSORS(interpreted_functions, Object);
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
- enum {
- kInstanceIndex, // instance object.
- kInterpreterHandleIndex, // managed object containing the interpreter.
- kInterpretedFunctionsIndex, // array of interpreter entry code objects.
- kLocalsNamesIndex, // array of array of local names.
- kCWasmEntriesIndex, // array of C_WASM_ENTRY stubs.
- kCWasmEntryMapIndex, // maps signature to index into CWasmEntries.
- kFieldCount
- };
-
- DEF_SIZE(FixedArray)
- DEF_OFFSET(Instance)
- DEF_OFFSET(InterpreterHandle)
- DEF_OFFSET(InterpretedFunctions)
- DEF_OFFSET(LocalsNames)
- DEF_OFFSET(CWasmEntries)
- DEF_OFFSET(CWasmEntryMap)
+ DECL_CAST(WasmDebugInfo)
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmDebugInfo)
+ DECL_VERIFIER(WasmDebugInfo)
+
+// Layout description.
+#define WASM_DEBUG_INFO_FIELDS(V) \
+ V(kInstanceOffset, kPointerSize) \
+ V(kInterpreterHandleOffset, kPointerSize) \
+ V(kInterpretedFunctionsOffset, kPointerSize) \
+ V(kLocalsNamesOffset, kPointerSize) \
+ V(kCWasmEntriesOffset, kPointerSize) \
+ V(kCWasmEntryMapOffset, kPointerSize) \
+ V(kSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, WASM_DEBUG_INFO_FIELDS)
+#undef WASM_DEBUG_INFO_FIELDS
static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
@@ -683,33 +682,10 @@ class WasmDebugInfo : public FixedArray {
wasm::FunctionSig*);
};
-// Attach function information in the form of deoptimization data to the given
-// code object. This information will be used for generating stack traces,
-// calling imported functions in the interpreter, knowing which function to
-// compile in a lazy compile stub, and more. The deopt data will be a newly
-// allocated FixedArray of length 2, where the first element is a WeakCell
-// containing the WasmInstanceObject, and the second element is the function
-// index.
-// If calling this method repeatedly for the same instance, pass a WeakCell
-// directly in order to avoid creating many cells pointing to the same instance.
-void AttachWasmFunctionInfo(Isolate*, Handle<Code>,
- MaybeHandle<WeakCell> weak_instance,
- int func_index);
-void AttachWasmFunctionInfo(Isolate*, Handle<Code>,
- MaybeHandle<WasmInstanceObject>, int func_index);
-
-struct WasmFunctionInfo {
- MaybeHandle<WasmInstanceObject> instance;
- int func_index;
-};
-WasmFunctionInfo GetWasmFunctionInfo(Isolate*, Handle<Code>);
-
-#undef DECL_OOL_QUERY
-#undef DECL_OOL_CAST
-#undef DECL_GETTER
#undef DECL_OPTIONAL_ACCESSORS
#undef WCM_CONST_OBJECT
#undef WCM_LARGE_NUMBER
+#undef WCM_OBJECT
#undef WCM_OBJECT_OR_WEAK
#undef WCM_SMALL_CONST_NUMBER
#undef WCM_WEAK_LINK
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index ac02b549a0..6e3b3e4daf 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -22,6 +22,7 @@ namespace wasm {
#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
@@ -60,6 +61,12 @@ namespace wasm {
CASE_I32_OP(name, str "32") \
CASE_UNSIGNED_OP(I32, name##8, str "8") \
CASE_UNSIGNED_OP(I32, name##16, str "16")
+#define CASE_UNSIGNED_ALL_OP(name, str) \
+ CASE_U32_OP(name, str) \
+ CASE_I64_OP(name, str "64") \
+ CASE_UNSIGNED_OP(I64, name##8, str "8") \
+ CASE_UNSIGNED_OP(I64, name##16, str "16") \
+ CASE_UNSIGNED_OP(I64, name##32, str "32")
const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
@@ -98,6 +105,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_FLOAT_OP(Min, "min")
CASE_FLOAT_OP(Max, "max")
CASE_FLOAT_OP(CopySign, "copysign")
+ CASE_REF_OP(Null, "null")
+ CASE_REF_OP(IsNull, "is_null")
+ CASE_REF_OP(Eq, "eq")
CASE_I32_OP(ConvertI64, "wrap/i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
@@ -247,15 +257,15 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
- CASE_U32_OP(AtomicLoad, "atomic_load")
- CASE_U32_OP(AtomicStore, "atomic_store")
- CASE_U32_OP(AtomicAdd, "atomic_add")
- CASE_U32_OP(AtomicSub, "atomic_sub")
- CASE_U32_OP(AtomicAnd, "atomic_and")
- CASE_U32_OP(AtomicOr, "atomic_or")
- CASE_U32_OP(AtomicXor, "atomic_xor")
- CASE_U32_OP(AtomicExchange, "atomic_xchng")
- CASE_U32_OP(AtomicCompareExchange, "atomic_cmpxchng")
+ CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
+ CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
+ CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic_add")
+ CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic_sub")
+ CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic_and")
+ CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic_or")
+ CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic_xor")
+ CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic_xchng")
+ CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic_cmpxchng")
default : return "unknown";
// clang-format on
@@ -267,6 +277,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_I64_OP
#undef CASE_F32_OP
#undef CASE_F64_OP
+#undef CASE_REF_OP
#undef CASE_F32x4_OP
#undef CASE_I32x4_OP
#undef CASE_I16x8_OP
@@ -285,6 +296,7 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
#undef CASE_SIMDI_OP
#undef CASE_SIGN_OP
#undef CASE_UNSIGNED_OP
+#undef CASE_UNSIGNED_ALL_OP
#undef CASE_ALL_SIGN_OP
#undef CASE_CONVERT_OP
#undef CASE_CONVERT_SAT_OP
@@ -338,6 +350,16 @@ bool WasmOpcodes::IsSignExtensionOpcode(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprRefNull:
+ case kExprRefIsNull:
+ case kExprRefEq:
+ return true;
+ default:
+ return false;
+ }
+}
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index c6b87f0556..daf3481daf 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -24,6 +24,7 @@ constexpr ValueType kWasmI64 = MachineRepresentation::kWord64;
constexpr ValueType kWasmF32 = MachineRepresentation::kFloat32;
constexpr ValueType kWasmF64 = MachineRepresentation::kFloat64;
constexpr ValueType kWasmS128 = MachineRepresentation::kSimd128;
+constexpr ValueType kWasmAnyRef = MachineRepresentation::kTaggedPointer;
constexpr ValueType kWasmVar = MachineRepresentation::kTagged;
using FunctionSig = Signature<ValueType>;
@@ -65,7 +66,8 @@ using WasmName = Vector<const char>;
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
- V(F64Const, 0x44, _)
+ V(F64Const, 0x44, _) \
+ V(RefNull, 0xd0, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -230,7 +232,9 @@ using WasmName = Vector<const char>;
V(I32SExtendI16, 0xc1, i_i) \
V(I64SExtendI8, 0xc2, l_l) \
V(I64SExtendI16, 0xc3, l_l) \
- V(I64SExtendI32, 0xc4, l_l)
+ V(I64SExtendI32, 0xc4, l_l) \
+ V(RefIsNull, 0xd1, i_r) \
+ V(RefEq, 0xd2, i_rr)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
@@ -245,26 +249,26 @@ using WasmName = Vector<const char>;
V(F64Atan2, 0xcd, d_dd) \
V(F64Pow, 0xce, d_dd) \
V(F64Mod, 0xcf, d_dd) \
- V(I32AsmjsDivS, 0xd0, i_ii) \
- V(I32AsmjsDivU, 0xd1, i_ii) \
- V(I32AsmjsRemS, 0xd2, i_ii) \
- V(I32AsmjsRemU, 0xd3, i_ii) \
- V(I32AsmjsLoadMem8S, 0xd4, i_i) \
- V(I32AsmjsLoadMem8U, 0xd5, i_i) \
- V(I32AsmjsLoadMem16S, 0xd6, i_i) \
- V(I32AsmjsLoadMem16U, 0xd7, i_i) \
- V(I32AsmjsLoadMem, 0xd8, i_i) \
- V(F32AsmjsLoadMem, 0xd9, f_i) \
- V(F64AsmjsLoadMem, 0xda, d_i) \
- V(I32AsmjsStoreMem8, 0xdb, i_ii) \
- V(I32AsmjsStoreMem16, 0xdc, i_ii) \
- V(I32AsmjsStoreMem, 0xdd, i_ii) \
- V(F32AsmjsStoreMem, 0xde, f_if) \
- V(F64AsmjsStoreMem, 0xdf, d_id) \
- V(I32AsmjsSConvertF32, 0xe0, i_f) \
- V(I32AsmjsUConvertF32, 0xe1, i_f) \
- V(I32AsmjsSConvertF64, 0xe2, i_d) \
- V(I32AsmjsUConvertF64, 0xe3, i_d)
+ V(I32AsmjsDivS, 0xd3, i_ii) \
+ V(I32AsmjsDivU, 0xd4, i_ii) \
+ V(I32AsmjsRemS, 0xd5, i_ii) \
+ V(I32AsmjsRemU, 0xd6, i_ii) \
+ V(I32AsmjsLoadMem8S, 0xd7, i_i) \
+ V(I32AsmjsLoadMem8U, 0xd8, i_i) \
+ V(I32AsmjsLoadMem16S, 0xd9, i_i) \
+ V(I32AsmjsLoadMem16U, 0xda, i_i) \
+ V(I32AsmjsLoadMem, 0xdb, i_i) \
+ V(F32AsmjsLoadMem, 0xdc, f_i) \
+ V(F64AsmjsLoadMem, 0xdd, d_i) \
+ V(I32AsmjsStoreMem8, 0xde, i_ii) \
+ V(I32AsmjsStoreMem16, 0xdf, i_ii) \
+ V(I32AsmjsStoreMem, 0xe0, i_ii) \
+ V(F32AsmjsStoreMem, 0xe1, f_if) \
+ V(F64AsmjsStoreMem, 0xe2, d_id) \
+ V(I32AsmjsSConvertF32, 0xe3, i_f) \
+ V(I32AsmjsUConvertF32, 0xe4, i_f) \
+ V(I32AsmjsSConvertF64, 0xe5, i_d) \
+ V(I32AsmjsUConvertF64, 0xe6, i_d)
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
V(F32x4Splat, 0xfd00, s_f) \
@@ -414,34 +418,70 @@ using WasmName = Vector<const char>;
V(I64SConvertSatF64, 0xfc06, l_d) \
V(I64UConvertSatF64, 0xfc07, l_d)
-#define FOREACH_ATOMIC_OPCODE(V) \
- V(I32AtomicLoad, 0xfe10, i_i) \
- V(I32AtomicLoad8U, 0xfe12, i_i) \
- V(I32AtomicLoad16U, 0xfe13, i_i) \
- V(I32AtomicStore, 0xfe17, v_ii) \
- V(I32AtomicStore8U, 0xfe19, v_ii) \
- V(I32AtomicStore16U, 0xfe1a, v_ii) \
- V(I32AtomicAdd, 0xfe1e, i_ii) \
- V(I32AtomicAdd8U, 0xfe20, i_ii) \
- V(I32AtomicAdd16U, 0xfe21, i_ii) \
- V(I32AtomicSub, 0xfe25, i_ii) \
- V(I32AtomicSub8U, 0xfe27, i_ii) \
- V(I32AtomicSub16U, 0xfe28, i_ii) \
- V(I32AtomicAnd, 0xfe2c, i_ii) \
- V(I32AtomicAnd8U, 0xfe2e, i_ii) \
- V(I32AtomicAnd16U, 0xfe2f, i_ii) \
- V(I32AtomicOr, 0xfe33, i_ii) \
- V(I32AtomicOr8U, 0xfe35, i_ii) \
- V(I32AtomicOr16U, 0xfe36, i_ii) \
- V(I32AtomicXor, 0xfe3a, i_ii) \
- V(I32AtomicXor8U, 0xfe3c, i_ii) \
- V(I32AtomicXor16U, 0xfe3d, i_ii) \
- V(I32AtomicExchange, 0xfe41, i_ii) \
- V(I32AtomicExchange8U, 0xfe43, i_ii) \
- V(I32AtomicExchange16U, 0xfe44, i_ii) \
- V(I32AtomicCompareExchange, 0xfe48, i_iii) \
- V(I32AtomicCompareExchange8U, 0xfe4a, i_iii) \
- V(I32AtomicCompareExchange16U, 0xfe4b, i_iii)
+#define FOREACH_ATOMIC_OPCODE(V) \
+ V(I32AtomicLoad, 0xfe10, i_i) \
+ V(I64AtomicLoad, 0xfe11, l_i) \
+ V(I32AtomicLoad8U, 0xfe12, i_i) \
+ V(I32AtomicLoad16U, 0xfe13, i_i) \
+ V(I64AtomicLoad8U, 0xfe14, l_i) \
+ V(I64AtomicLoad16U, 0xfe15, l_i) \
+ V(I64AtomicLoad32U, 0xfe16, l_i) \
+ V(I32AtomicStore, 0xfe17, v_ii) \
+ V(I64AtomicStore, 0xfe18, v_il) \
+ V(I32AtomicStore8U, 0xfe19, v_ii) \
+ V(I32AtomicStore16U, 0xfe1a, v_ii) \
+ V(I64AtomicStore8U, 0xfe1b, v_il) \
+ V(I64AtomicStore16U, 0xfe1c, v_il) \
+ V(I64AtomicStore32U, 0xfe1d, v_il) \
+ V(I32AtomicAdd, 0xfe1e, i_ii) \
+ V(I64AtomicAdd, 0xfe1f, l_il) \
+ V(I32AtomicAdd8U, 0xfe20, i_ii) \
+ V(I32AtomicAdd16U, 0xfe21, i_ii) \
+ V(I64AtomicAdd8U, 0xfe22, l_il) \
+ V(I64AtomicAdd16U, 0xfe23, l_il) \
+ V(I64AtomicAdd32U, 0xfe24, l_il) \
+ V(I32AtomicSub, 0xfe25, i_ii) \
+ V(I64AtomicSub, 0xfe26, l_il) \
+ V(I32AtomicSub8U, 0xfe27, i_ii) \
+ V(I32AtomicSub16U, 0xfe28, i_ii) \
+ V(I64AtomicSub8U, 0xfe29, l_il) \
+ V(I64AtomicSub16U, 0xfe2a, l_il) \
+ V(I64AtomicSub32U, 0xfe2b, l_il) \
+ V(I32AtomicAnd, 0xfe2c, i_ii) \
+ V(I64AtomicAnd, 0xfe2d, l_il) \
+ V(I32AtomicAnd8U, 0xfe2e, i_ii) \
+ V(I32AtomicAnd16U, 0xfe2f, i_ii) \
+ V(I64AtomicAnd8U, 0xfe30, l_il) \
+ V(I64AtomicAnd16U, 0xfe31, l_il) \
+ V(I64AtomicAnd32U, 0xfe32, l_il) \
+ V(I32AtomicOr, 0xfe33, i_ii) \
+ V(I64AtomicOr, 0xfe34, l_il) \
+ V(I32AtomicOr8U, 0xfe35, i_ii) \
+ V(I32AtomicOr16U, 0xfe36, i_ii) \
+ V(I64AtomicOr8U, 0xfe37, l_il) \
+ V(I64AtomicOr16U, 0xfe38, l_il) \
+ V(I64AtomicOr32U, 0xfe39, l_il) \
+ V(I32AtomicXor, 0xfe3a, i_ii) \
+ V(I64AtomicXor, 0xfe3b, l_il) \
+ V(I32AtomicXor8U, 0xfe3c, i_ii) \
+ V(I32AtomicXor16U, 0xfe3d, i_ii) \
+ V(I64AtomicXor8U, 0xfe3e, l_il) \
+ V(I64AtomicXor16U, 0xfe3f, l_il) \
+ V(I64AtomicXor32U, 0xfe40, l_il) \
+ V(I32AtomicExchange, 0xfe41, i_ii) \
+ V(I64AtomicExchange, 0xfe42, l_il) \
+ V(I32AtomicExchange8U, 0xfe43, i_ii) \
+ V(I32AtomicExchange16U, 0xfe44, i_ii) \
+ V(I64AtomicExchange8U, 0xfe45, l_il) \
+ V(I64AtomicExchange16U, 0xfe46, l_il) \
+ V(I64AtomicExchange32U, 0xfe47, l_il) \
+ V(I32AtomicCompareExchange, 0xfe48, i_iii) \
+ V(I64AtomicCompareExchange, 0xfe49, l_ill) \
+ V(I32AtomicCompareExchange8U, 0xfe4a, i_iii) \
+ V(I32AtomicCompareExchange16U, 0xfe4b, i_iii) \
+ V(I64AtomicCompareExchange8U, 0xfe4c, l_ill) \
+ V(I64AtomicCompareExchange16U, 0xfe4d, l_ill) \
+ V(I64AtomicCompareExchange32U, 0xfe4e, l_ill)
// All opcodes.
#define FOREACH_OPCODE(V) \
@@ -460,39 +500,43 @@ using WasmName = Vector<const char>;
FOREACH_NUMERIC_OPCODE(V)
// All signatures.
-#define FOREACH_SIGNATURE(V) \
- FOREACH_SIMD_SIGNATURE(V) \
- V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
- V(i_i, kWasmI32, kWasmI32) \
- V(i_v, kWasmI32) \
- V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
- V(i_f, kWasmI32, kWasmF32) \
- V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
- V(i_d, kWasmI32, kWasmF64) \
- V(i_l, kWasmI32, kWasmI64) \
- V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
- V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
- V(l_l, kWasmI64, kWasmI64) \
- V(l_i, kWasmI64, kWasmI32) \
- V(l_f, kWasmI64, kWasmF32) \
- V(l_d, kWasmI64, kWasmF64) \
- V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
- V(f_f, kWasmF32, kWasmF32) \
- V(f_d, kWasmF32, kWasmF64) \
- V(f_i, kWasmF32, kWasmI32) \
- V(f_l, kWasmF32, kWasmI64) \
- V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
- V(d_d, kWasmF64, kWasmF64) \
- V(d_f, kWasmF64, kWasmF32) \
- V(d_i, kWasmF64, kWasmI32) \
- V(d_l, kWasmF64, kWasmI64) \
- V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
- V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
- V(d_id, kWasmF64, kWasmI32, kWasmF64) \
- V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
- V(f_if, kWasmF32, kWasmI32, kWasmF32) \
- V(v_il, kWasmI64, kWasmI32, kWasmI64) \
- V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32)
+#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
+ V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_i, kWasmI32, kWasmI32) \
+ V(i_v, kWasmI32) \
+ V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+ V(i_f, kWasmI32, kWasmF32) \
+ V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+ V(i_d, kWasmI32, kWasmF64) \
+ V(i_l, kWasmI32, kWasmI64) \
+ V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+ V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+ V(l_l, kWasmI64, kWasmI64) \
+ V(l_i, kWasmI64, kWasmI32) \
+ V(l_f, kWasmI64, kWasmF32) \
+ V(l_d, kWasmI64, kWasmF64) \
+ V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+ V(f_f, kWasmF32, kWasmF32) \
+ V(f_d, kWasmF32, kWasmF64) \
+ V(f_i, kWasmF32, kWasmI32) \
+ V(f_l, kWasmF32, kWasmI64) \
+ V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+ V(d_d, kWasmF64, kWasmF64) \
+ V(d_f, kWasmF64, kWasmF32) \
+ V(d_i, kWasmF64, kWasmI32) \
+ V(d_l, kWasmF64, kWasmI64) \
+ V(v_ii, kWasmStmt, kWasmI32, kWasmI32) \
+ V(v_id, kWasmStmt, kWasmI32, kWasmF64) \
+ V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+ V(v_if, kWasmStmt, kWasmI32, kWasmF32) \
+ V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+ V(v_il, kWasmStmt, kWasmI32, kWasmI64) \
+ V(l_il, kWasmI64, kWasmI32, kWasmI64) \
+ V(i_iii, kWasmI32, kWasmI32, kWasmI32, kWasmI32) \
+ V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
+ V(i_r, kWasmI32, kWasmAnyRef) \
+ V(i_rr, kWasmI32, kWasmAnyRef, kWasmAnyRef)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -573,6 +617,21 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; }
+ static LoadType ForValueType(ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ return kI32Load;
+ case kWasmI64:
+ return kI64Load;
+ case kWasmF32:
+ return kF32Load;
+ case kWasmF64:
+ return kF64Load;
+ default:
+ UNREACHABLE();
+ }
+ }
+
private:
const LoadTypeValue val_;
@@ -625,6 +684,21 @@ class StoreType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr ValueType mem_rep() const { return kMemRep[val_]; }
+ static StoreType ForValueType(ValueType type) {
+ switch (type) {
+ case kWasmI32:
+ return kI32Store;
+ case kWasmI64:
+ return kI64Store;
+ case kWasmF32:
+ return kF32Store;
+ case kWasmF64:
+ return kF64Store;
+ default:
+ UNREACHABLE();
+ }
+ }
+
private:
const StoreTypeValue val_;
@@ -656,6 +730,7 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static bool IsPrefixOpcode(WasmOpcode opcode);
static bool IsControlOpcode(WasmOpcode opcode);
static bool IsSignExtensionOpcode(WasmOpcode opcode);
+ static bool IsAnyRefOpcode(WasmOpcode opcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
static bool IsUnconditionalJump(WasmOpcode opcode);
@@ -681,6 +756,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kLocalF64;
case kWasmS128:
return kLocalS128;
+ case kWasmAnyRef:
+ return kLocalAnyRef;
case kWasmStmt:
return kLocalVoid;
default:
@@ -698,6 +775,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return MachineType::Float32();
case kWasmF64:
return MachineType::Float64();
+ case kWasmAnyRef:
+ return MachineType::TaggedPointer();
case kWasmS128:
return MachineType::Simd128();
case kWasmStmt:
@@ -719,6 +798,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return kWasmF32;
case MachineRepresentation::kFloat64:
return kWasmF64;
+ case MachineRepresentation::kTaggedPointer:
+ return kWasmAnyRef;
case MachineRepresentation::kSimd128:
return kWasmS128;
default:
@@ -736,6 +817,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return 'f';
case kWasmF64:
return 'd';
+ case kWasmAnyRef:
+ return 'r';
case kWasmS128:
return 's';
case kWasmStmt:
@@ -757,6 +840,8 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return "f32";
case kWasmF64:
return "f64";
+ case kWasmAnyRef:
+ return "ref";
case kWasmS128:
return "s128";
case kWasmStmt:
@@ -777,7 +862,8 @@ struct WasmInitExpr {
kI32Const,
kI64Const,
kF32Const,
- kF64Const
+ kF64Const,
+ kAnyRefConst,
} kind;
union {
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 6deccae6dc..3fe63b9c71 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -4,7 +4,7 @@
#include "src/wasm/wasm-result.h"
-#include "src/factory.h"
+#include "src/heap/factory.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/objects.h"
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 8250db9040..a1e5a885af 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -116,7 +116,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
}
// Create and return exception object.
- MUST_USE_RESULT Handle<Object> Reify();
+ V8_WARN_UNUSED_RESULT Handle<Object> Reify();
// Reset any error which was set on this thrower.
void Reset();
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 240ffbca3d..a069823194 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -11,6 +11,7 @@
#include "src/objects.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
+#include "src/utils.h"
#include "src/version.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -23,6 +24,7 @@
namespace v8 {
namespace internal {
namespace wasm {
+
namespace {
class Writer {
@@ -35,7 +37,7 @@ class Writer {
os << "wrote: " << (size_t)value << " sized: " << sizeof(T) << std::endl;
}
DCHECK_GE(buffer_.size(), sizeof(T));
- memcpy(buffer_.start(), reinterpret_cast<const byte*>(&value), sizeof(T));
+ WriteUnalignedValue(buffer_.start(), value);
buffer_ = buffer_ + sizeof(T);
}
@@ -63,8 +65,7 @@ class Reader {
template <typename T>
T Read() {
DCHECK_GE(buffer_.size(), sizeof(T));
- T ret;
- memcpy(reinterpret_cast<byte*>(&ret), buffer_.start(), sizeof(T));
+ T ret = ReadUnalignedValue<T>(buffer_.start());
buffer_ = buffer_ + sizeof(T);
if (FLAG_wasm_trace_serialization) {
OFStream os(stdout);
@@ -99,11 +100,14 @@ class Reader {
constexpr size_t kVersionSize = 4 * sizeof(uint32_t);
+// Start from 1 so an encoded stub id is not confused with an encoded builtin.
+constexpr int kFirstStubId = 1;
+
void WriteVersion(Isolate* isolate, Vector<byte> buffer) {
DCHECK_GE(buffer.size(), kVersionSize);
Writer writer(buffer);
writer.Write(SerializedData::ComputeMagicNumber(
- ExternalReferenceTable::instance(isolate)));
+ isolate->heap()->external_reference_table()));
writer.Write(Version::Hash());
writer.Write(static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
writer.Write(FlagList::Hash());
@@ -117,6 +121,35 @@ bool IsSupportedVersion(Isolate* isolate, const Vector<const byte> buffer) {
return false;
}
+// On Intel, call sites are encoded as a displacement. For linking
+// and for serialization/deserialization, we want to store/retrieve
+// a tag (the function index). On Intel, that means accessing the
+// raw displacement. Everywhere else, that simply means accessing
+// the target address.
+void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ *(reinterpret_cast<uint32_t*>(rinfo->target_address_address())) = tag;
+#else
+ Address addr = reinterpret_cast<Address>(tag);
+ if (rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE) {
+ rinfo->set_target_external_reference(addr, SKIP_ICACHE_FLUSH);
+ } else {
+ rinfo->set_target_address(addr, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ }
+#endif
+}
+
+uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ return *(reinterpret_cast<uint32_t*>(rinfo->target_address_address()));
+#else
+ Address addr = rinfo->rmode() == RelocInfo::EXTERNAL_REFERENCE
+ ? rinfo->target_external_reference()
+ : rinfo->target_address();
+ return static_cast<uint32_t>(reinterpret_cast<size_t>(addr));
+#endif
+}
+
} // namespace
enum SerializationSection { Init, Metadata, Stubs, CodeSection, Done };
@@ -133,7 +166,6 @@ class V8_EXPORT_PRIVATE NativeModuleSerializer {
static size_t GetCodeHeaderSize();
size_t MeasureCode(const WasmCode*) const;
size_t MeasureCopiedStubs() const;
- ByteArray* GetSourcePositions(const WasmCode*) const;
void BufferHeader();
// we buffer all the stubs because they are small
@@ -188,7 +220,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
DCHECK_NOT_NULL(native_module_);
// TODO(mtrofin): persist the export wrappers. Ideally, we'd only persist
// the unique ones, i.e. the cache.
- ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate_);
+ ExternalReferenceTable* table = isolate_->heap()->external_reference_table();
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
reference_table_lookup_.insert(std::make_pair(addr, i));
@@ -221,27 +253,23 @@ void NativeModuleSerializer::BufferHeader() {
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
- return sizeof(size_t) + // size of this section
- sizeof(size_t) + // offset of constant pool
- sizeof(size_t) + // offset of safepoint table
- sizeof(size_t) + // offset of handler table
- sizeof(uint32_t) + // stack slots
- sizeof(size_t) + // code size
- sizeof(size_t) + // reloc size
- sizeof(uint32_t) + // source positions size
- sizeof(size_t) + // protected instructions size
- sizeof(bool); // is_liftoff
+ return sizeof(size_t) + // size of this section
+ sizeof(size_t) + // offset of constant pool
+ sizeof(size_t) + // offset of safepoint table
+ sizeof(size_t) + // offset of handler table
+ sizeof(uint32_t) + // stack slots
+ sizeof(size_t) + // code size
+ sizeof(size_t) + // reloc size
+ sizeof(size_t) + // source positions size
+ sizeof(size_t) + // protected instructions size
+ sizeof(WasmCode::Tier); // tier
}
size_t NativeModuleSerializer::MeasureCode(const WasmCode* code) const {
- ByteArray* source_positions = GetSourcePositions(code);
return GetCodeHeaderSize() + code->instructions().size() + // code
code->reloc_info().size() + // reloc info
- (source_positions == nullptr
- ? 0
- : static_cast<uint32_t>(
- source_positions->length())) + // source positions
- code->protected_instructions().size() *
+ code->source_positions().size() + // source pos.
+ code->protected_instructions().size() * // protected inst.
sizeof(trap_handler::ProtectedInstructionData);
}
@@ -285,7 +313,7 @@ void NativeModuleSerializer::BufferCopiedStubs() {
Writer writer(remaining_);
writer.Write(
static_cast<uint32_t>((buff_size - sizeof(uint32_t)) / sizeof(uint32_t)));
- uint32_t stub_id = 0;
+ uint32_t stub_id = kFirstStubId;
for (auto pair : native_module_->stubs_) {
uint32_t key = pair.first;
@@ -306,19 +334,6 @@ void NativeModuleSerializer::BufferCopiedStubs() {
}
}
-ByteArray* NativeModuleSerializer::GetSourcePositions(
- const WasmCode* code) const {
- if (code->kind() != WasmCode::kFunction) return nullptr;
- uint32_t index = code->index();
- Object* source_positions_entry =
- native_module_->compiled_module()->source_positions()->get(
- static_cast<int>(index));
- if (source_positions_entry->IsByteArray()) {
- return ByteArray::cast(source_positions_entry);
- }
- return nullptr;
-}
-
void NativeModuleSerializer::BufferCurrentWasmCode() {
const WasmCode* code = native_module_->GetCode(index_);
size_t size = MeasureCode(code);
@@ -331,14 +346,6 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
const WasmCode* code) {
// We write the address, the size, and then copy the code as-is, followed
// by reloc info, followed by source positions.
- ByteArray* source_positions_entry = GetSourcePositions(code);
- Address source_positions = nullptr;
- uint32_t source_positions_size = 0;
- if (source_positions_entry != nullptr) {
- source_positions = source_positions_entry->GetDataStartAddress();
- source_positions_size =
- static_cast<uint32_t>(source_positions_entry->length());
- }
Writer writer(remaining_);
// write the header
writer.Write(MeasureCode(code));
@@ -348,15 +355,15 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
writer.Write(code->stack_slots());
writer.Write(code->instructions().size());
writer.Write(code->reloc_info().size());
- writer.Write(source_positions_size);
+ writer.Write(code->source_positions().size());
writer.Write(code->protected_instructions().size());
- writer.Write(code->is_liftoff());
+ writer.Write(code->tier());
// next is the code, which we have to reloc.
Address serialized_code_start = writer.current_buffer().start();
// write the code and everything else
writer.WriteVector(code->instructions());
writer.WriteVector(code->reloc_info());
- writer.WriteVector({source_positions, source_positions_size});
+ writer.WriteVector(code->source_positions());
writer.WriteVector(
{reinterpret_cast<const byte*>(code->protected_instructions().data()),
sizeof(trap_handler::ProtectedInstructionData) *
@@ -364,7 +371,8 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
RelocIterator orig_iter(code->instructions(), code->reloc_info(),
code->constant_pool(), mask);
for (RelocIterator
@@ -386,7 +394,16 @@ void NativeModuleSerializer::BufferCodeInAllocatedScratch(
} break;
case RelocInfo::RUNTIME_ENTRY: {
Address orig_target = orig_iter.rinfo()->target_address();
- uint32_t tag = reference_table_lookup_[orig_target];
+ auto ref_iter = reference_table_lookup_.find(orig_target);
+ DCHECK(ref_iter != reference_table_lookup_.end());
+ uint32_t tag = ref_iter->second;
+ SetWasmCalleeTag(iter.rinfo(), tag);
+ } break;
+ case RelocInfo::EXTERNAL_REFERENCE: {
+ Address orig_target = orig_iter.rinfo()->target_external_reference();
+ auto ref_iter = reference_table_lookup_.find(orig_target);
+ DCHECK(ref_iter != reference_table_lookup_.end());
+ uint32_t tag = ref_iter->second;
SetWasmCalleeTag(iter.rinfo(), tag);
} break;
default:
@@ -460,14 +477,6 @@ size_t NativeModuleSerializer::Write(Vector<byte> dest) {
// static
std::pair<std::unique_ptr<const byte[]>, size_t> SerializeNativeModule(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- if (!FLAG_wasm_jit_to_native) {
- std::unique_ptr<ScriptData> script_data =
- WasmCompiledModuleSerializer::SerializeWasmModule(isolate,
- compiled_module);
- script_data->ReleaseDataOwnership();
- size_t size = static_cast<size_t>(script_data->length());
- return {std::unique_ptr<const byte[]>(script_data->data()), size};
- }
NativeModule* native_module = compiled_module->GetNativeModule();
NativeModuleSerializer serializer(isolate, native_module);
size_t version_size = kVersionSize;
@@ -500,7 +509,6 @@ bool NativeModuleDeserializer::Read(Vector<const byte> data) {
for (; index_ < native_module_->FunctionCount(); ++index_) {
if (!ReadCode()) return false;
}
- native_module_->LinkAll();
return data.size() - unread_.size();
}
@@ -543,9 +551,10 @@ bool NativeModuleDeserializer::ReadCode() {
uint32_t stack_slot_count = reader.Read<uint32_t>();
size_t code_size = reader.Read<size_t>();
size_t reloc_size = reader.Read<size_t>();
- uint32_t source_position_size = reader.Read<uint32_t>();
+ size_t source_position_size = reader.Read<size_t>();
size_t protected_instructions_size = reader.Read<size_t>();
- bool is_liftoff = reader.Read<bool>();
+ WasmCode::Tier tier = reader.Read<WasmCode::Tier>();
+
std::shared_ptr<ProtectedInstructions> protected_instructions(
new ProtectedInstructions(protected_instructions_size));
DCHECK_EQ(protected_instructions_size, protected_instructions->size());
@@ -556,18 +565,24 @@ bool NativeModuleDeserializer::ReadCode() {
reloc_info.reset(new byte[reloc_size]);
reader.ReadIntoVector({reloc_info.get(), reloc_size});
}
+ std::unique_ptr<byte[]> source_pos;
+ if (source_position_size > 0) {
+ source_pos.reset(new byte[source_position_size]);
+ reader.ReadIntoVector({source_pos.get(), source_position_size});
+ }
WasmCode* ret = native_module_->AddOwnedCode(
- code_buffer, std::move(reloc_info), reloc_size, Just(index_),
- WasmCode::kFunction, constant_pool_offset, stack_slot_count,
- safepoint_table_offset, handler_table_offset, protected_instructions,
- is_liftoff);
- if (ret == nullptr) return false;
+ code_buffer, std::move(reloc_info), reloc_size, std::move(source_pos),
+ source_position_size, Just(index_), WasmCode::kFunction,
+ constant_pool_offset, stack_slot_count, safepoint_table_offset,
+ handler_table_offset, protected_instructions, tier,
+ WasmCode::kNoFlushICache);
native_module_->code_table_[index_] = ret;
// now relocate the code
int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
for (RelocIterator iter(ret->instructions(), ret->reloc_info(),
ret->constant_pool(), mask);
!iter.done(); iter.next()) {
@@ -576,7 +591,7 @@ bool NativeModuleDeserializer::ReadCode() {
case RelocInfo::EMBEDDED_OBJECT: {
// We only expect {undefined}. We check for that when we add code.
iter.rinfo()->set_target_object(isolate_->heap()->undefined_value(),
- SKIP_WRITE_BARRIER);
+ SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
break;
}
case RelocInfo::CODE_TARGET: {
@@ -587,26 +602,29 @@ bool NativeModuleDeserializer::ReadCode() {
break;
}
case RelocInfo::RUNTIME_ENTRY: {
- uint32_t orig_target = static_cast<uint32_t>(
- reinterpret_cast<intptr_t>(iter.rinfo()->target_address()));
+ uint32_t tag = GetWasmCalleeTag(iter.rinfo());
Address address =
- ExternalReferenceTable::instance(isolate_)->address(orig_target);
+ isolate_->heap()->external_reference_table()->address(tag);
iter.rinfo()->set_target_runtime_entry(address, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
break;
}
- default:
+ case RelocInfo::EXTERNAL_REFERENCE: {
+ uint32_t tag = GetWasmCalleeTag(iter.rinfo());
+ Address address =
+ isolate_->heap()->external_reference_table()->address(tag);
+ iter.rinfo()->set_target_external_reference(address, SKIP_ICACHE_FLUSH);
break;
+ }
+ default:
+ UNREACHABLE();
}
}
- if (source_position_size > 0) {
- Handle<ByteArray> source_positions = isolate_->factory()->NewByteArray(
- static_cast<int>(source_position_size), TENURED);
- reader.ReadIntoVector(
- {source_positions->GetDataStartAddress(), source_position_size});
- native_module_->compiled_module()->source_positions()->set(
- static_cast<int>(index_), *source_positions);
- }
+ // Flush the i-cache here instead of in AddOwnedCode, to include the changes
+ // made while iterating over the RelocInfo above.
+ Assembler::FlushICache(ret->instructions().start(),
+ ret->instructions().size());
+
if (protected_instructions_size > 0) {
reader.ReadIntoVector(
{reinterpret_cast<byte*>(protected_instructions->data()),
@@ -624,22 +642,12 @@ Address NativeModuleDeserializer::GetTrampolineOrStubFromTag(uint32_t tag) {
return native_module_->GetLocalAddressFor(handle(builtin));
} else {
DCHECK_EQ(tag & 0xFFFF0000, 0);
- return stubs_[tag];
+ return stubs_[tag - kFirstStubId];
}
}
MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
Isolate* isolate, Vector<const byte> data, Vector<const byte> wire_bytes) {
- if (!FLAG_wasm_jit_to_native) {
- ScriptData script_data(data.start(), data.length());
- Handle<FixedArray> compiled_module;
- if (!WasmCompiledModuleSerializer::DeserializeWasmModule(
- isolate, &script_data, wire_bytes)
- .ToHandle(&compiled_module)) {
- return {};
- }
- return Handle<WasmCompiledModule>::cast(compiled_module);
- }
if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
return {};
}
@@ -672,11 +680,10 @@ MaybeHandle<WasmCompiledModule> DeserializeNativeModule(
Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
static_cast<int>(export_wrappers_size), TENURED);
- Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate, shared->module(), isolate->factory()->empty_fixed_array(),
- export_wrappers, std::vector<wasm::GlobalHandleAddress>(),
- trap_handler::IsTrapHandlerEnabled());
- compiled_module->OnWasmModuleDecodingComplete(shared);
+ Handle<WasmCompiledModule> compiled_module =
+ WasmCompiledModule::New(isolate, shared->module(), export_wrappers,
+ trap_handler::IsTrapHandlerEnabled());
+ compiled_module->set_shared(*shared);
script->set_wasm_compiled_module(*compiled_module);
NativeModuleDeserializer deserializer(isolate,
compiled_module->GetNativeModule());
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 1619241332..15ee678f93 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -46,7 +46,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
// Print the function signature.
os << "func";
- WasmName fun_name = wire_bytes.GetNameOrNull(fun);
+ WasmName fun_name = wire_bytes.GetNameOrNull(fun, module);
if (IsValidFunctionName(fun_name)) {
os << " $";
os.write(fun_name.start(), fun_name.length());
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index eef4158f53..3d3608cea7 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -303,7 +303,8 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+ IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
return reinterpret_cast<Address>(pc_);
}
@@ -335,12 +336,28 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
+void RelocInfo::set_wasm_code_table_entry(Address target,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
+ Memory::Address_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(pc_, sizeof(Address));
+ }
+}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Memory::Address_at(pc_) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(pc_, sizeof(Address));
+ }
+}
Address RelocInfo::target_internal_reference() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
@@ -383,6 +400,11 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
}
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Memory::Address_at(pc_);
+}
+
void RelocInfo::WipeOut() {
if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
@@ -410,6 +432,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
+ } else if (RelocInfo::IsOffHeapTarget(mode)) {
+ visitor->VisitOffHeapTarget(host(), this);
}
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 0ec50147fd..6f1eaab9ed 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -542,7 +542,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (desc.buffer_size > kMaximalBufferSize) {
- V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
}
// Set up new buffer.
@@ -3949,6 +3949,7 @@ void Assembler::sqrtsd(XMMRegister dst, Operand src) {
}
void Assembler::haddps(XMMRegister dst, XMMRegister src) {
+ DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
@@ -3958,6 +3959,7 @@ void Assembler::haddps(XMMRegister dst, XMMRegister src) {
}
void Assembler::haddps(XMMRegister dst, Operand src) {
+ DCHECK(IsEnabled(SSE3));
EnsureSpace ensure_space(this);
emit(0xF2);
emit_optional_rex_32(dst, src);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index a532729d15..9e00e682e5 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -103,8 +103,8 @@ class Register : public RegisterBase<Register, kRegAfterLast> {
explicit constexpr Register(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(Register) &&
- sizeof(Register) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(Register);
+static_assert(sizeof(Register) == sizeof(int),
"Register can efficiently be passed by value");
#define DECLARE_REGISTER(R) \
@@ -204,8 +204,8 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
-static_assert(IS_TRIVIALLY_COPYABLE(XMMRegister) &&
- sizeof(XMMRegister) == sizeof(int),
+ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
+static_assert(sizeof(XMMRegister) == sizeof(int),
"XMMRegister can efficiently be passed by value");
typedef XMMRegister FloatRegister;
@@ -301,23 +301,25 @@ enum RoundingMode {
// -----------------------------------------------------------------------------
// Machine instruction Immediates
-class Immediate BASE_EMBEDDED {
+class Immediate {
public:
- explicit Immediate(int32_t value) : value_(value) {}
- explicit Immediate(int32_t value, RelocInfo::Mode rmode)
+ explicit constexpr Immediate(int32_t value) : value_(value) {}
+ explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
: value_(value), rmode_(rmode) {}
- explicit Immediate(Smi* value) {
+ explicit Immediate(Smi* value)
+ : value_(static_cast<int32_t>(reinterpret_cast<intptr_t>(value))) {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
- value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
}
private:
- int32_t value_;
- RelocInfo::Mode rmode_ = RelocInfo::NONE;
+ const int32_t value_;
+ const RelocInfo::Mode rmode_ = RelocInfo::NONE;
friend class Assembler;
};
-
+ASSERT_TRIVIALLY_COPYABLE(Immediate);
+static_assert(sizeof(Immediate) <= kPointerSize,
+ "Immediate must be small enough to pass it by value");
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -380,19 +382,9 @@ class Operand {
private:
const Data data_;
};
+ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kPointerSize,
"Operand must be small enough to pass it by value");
-// Unfortunately, MSVC 2015 is broken in that both is_trivially_destructible and
-// is_trivially_copy_constructible are true, but is_trivially_copyable is false.
-// (status at 2018-02-26, observed on the msvc waterfall bot).
-#if V8_CC_MSVC
-static_assert(std::is_trivially_copy_constructible<Operand>::value &&
- std::is_trivially_destructible<Operand>::value,
- "Operand must be trivially copyable to pass it by value");
-#else
-static_assert(IS_TRIVIALLY_COPYABLE(Operand),
- "Operand must be trivially copyable to pass it by value");
-#endif
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
@@ -1460,6 +1452,7 @@ class Assembler : public AssemblerBase {
AVX_P_3(vor, 0x56);
AVX_P_3(vxor, 0x57);
AVX_3(vcvtsd2ss, 0x5a, vsd);
+ AVX_3(vhaddps, 0x7c, vsd);
#undef AVX_3
#undef AVX_S_3
@@ -1636,85 +1629,85 @@ class Assembler : public AssemblerBase {
void vlddqu(XMMRegister dst, Operand src) {
vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
}
- void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
emit(imm8);
}
- void vpextrb(Register dst, XMMRegister src, int8_t imm8) {
+ void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrb(Operand dst, XMMRegister src, int8_t imm8) {
+ void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrw(Register dst, XMMRegister src, int8_t imm8) {
+ void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
emit(imm8);
}
- void vpextrw(Operand dst, XMMRegister src, int8_t imm8) {
+ void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrd(Register dst, XMMRegister src, int8_t imm8) {
+ void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
XMMRegister idst = XMMRegister::from_code(dst.code());
vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpextrd(Operand dst, XMMRegister src, int8_t imm8) {
+ void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
+ void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
emit(imm8);
}
- void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
+ void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
emit(imm8);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, int8_t imm8) {
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
XMMRegister isrc = XMMRegister::from_code(src2.code());
vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
emit(imm8);
}
- void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8) {
+ void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
emit(imm8);
}
- void vpshufd(XMMRegister dst, XMMRegister src, int8_t imm8) {
+ void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8) {
vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
emit(imm8);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 2ff00f0402..fc98006f78 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -122,69 +122,57 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ movp(scratch, Immediate(1));
__ Cvtlsi2sd(double_result, scratch);
- if (exponent_type() == TAGGED) {
- __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
- __ SmiToInteger32(exponent, exponent);
- __ jmp(&int_exponent);
-
- __ bind(&exponent_not_smi);
- __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
- }
-
- if (exponent_type() != INTEGER) {
- Label fast_power, try_arithmetic_simplification;
- // Detect integer exponents stored as double.
- __ DoubleToI(exponent, double_exponent, double_scratch,
- &try_arithmetic_simplification,
- &try_arithmetic_simplification);
- __ jmp(&int_exponent);
-
- __ bind(&try_arithmetic_simplification);
- __ Cvttsd2si(exponent, double_exponent);
- // Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x1));
- __ j(overflow, &call_runtime);
-
- // Using FPU instructions to calculate power.
- Label fast_power_failed;
- __ bind(&fast_power);
- __ fnclex(); // Clear flags to catch exceptions later.
- // Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subp(rsp, Immediate(kDoubleSize));
- __ Movsd(Operand(rsp, 0), double_exponent);
- __ fld_d(Operand(rsp, 0)); // E
- __ Movsd(Operand(rsp, 0), double_base);
- __ fld_d(Operand(rsp, 0)); // B, E
-
- // Exponent is in st(1) and base is in st(0)
- // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
- // FYL2X calculates st(1) * log2(st(0))
- __ fyl2x(); // X
- __ fld(0); // X, X
- __ frndint(); // rnd(X), X
- __ fsub(1); // rnd(X), X-rnd(X)
- __ fxch(1); // X - rnd(X), rnd(X)
- // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
- __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
- __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
- // FSCALE calculates st(0) * 2^st(1)
- __ fscale(); // 2^X, rnd(X)
- __ fstp(1);
- // Bail out to runtime in case of exceptions in the status word.
- __ fnstsw_ax();
- __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
- __ j(not_zero, &fast_power_failed, Label::kNear);
- __ fstp_d(Operand(rsp, 0));
- __ Movsd(double_result, Operand(rsp, 0));
- __ addp(rsp, Immediate(kDoubleSize));
- __ jmp(&done);
-
- __ bind(&fast_power_failed);
- __ fninit();
- __ addp(rsp, Immediate(kDoubleSize));
- __ jmp(&call_runtime);
- }
+ Label fast_power, try_arithmetic_simplification;
+ // Detect integer exponents stored as double.
+ __ DoubleToI(exponent, double_exponent, double_scratch,
+ &try_arithmetic_simplification, &try_arithmetic_simplification);
+ __ jmp(&int_exponent);
+
+ __ bind(&try_arithmetic_simplification);
+ __ Cvttsd2si(exponent, double_exponent);
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
+
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ subp(rsp, Immediate(kDoubleSize));
+ __ Movsd(Operand(rsp, 0), double_exponent);
+ __ fld_d(Operand(rsp, 0)); // E
+ __ Movsd(Operand(rsp, 0), double_base);
+ __ fld_d(Operand(rsp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(rsp, 0));
+ __ Movsd(double_result, Operand(rsp, 0));
+ __ addp(rsp, Immediate(kDoubleSize));
+ __ jmp(&done);
+
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ addp(rsp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
// Calculate power with integer exponent.
__ bind(&int_exponent);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index ee2cfd5e8b..056d3d01f6 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -5,6 +5,7 @@
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
+#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/x64/assembler-x64-inl.h"
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 91cee67bdd..104f46889e 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1040,6 +1040,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x51:
+ AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x58:
AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1156,6 +1161,11 @@ int DisassemblerX64::AVXInstruction(byte* data) {
AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
break;
+ case 0x7C:
+ AppendToBuffer("vhaddps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1980,7 +1990,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("haddps %s,", NameOfXMMRegister(regop));
- current += PrintRightOperand(current);
+ current += PrintRightXMMOperand(current);
} else {
UnimplementedInstruction();
}
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/x64/frame-constants-x64.h
index 07d2d1a8b1..fc451b663c 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/x64/frame-constants-x64.h
@@ -11,45 +11,47 @@ namespace internal {
class EntryFrameConstants : public AllStatic {
public:
#ifdef _WIN64
- static const int kCalleeSaveXMMRegisters = 10;
- static const int kXMMRegisterSize = 16;
- static const int kXMMRegistersBlockSize =
+ static constexpr int kCalleeSaveXMMRegisters = 10;
+ static constexpr int kXMMRegisterSize = 16;
+ static constexpr int kXMMRegistersBlockSize =
kXMMRegisterSize * kCalleeSaveXMMRegisters;
- static const int kCallerFPOffset =
+ static constexpr int kCallerFPOffset =
-3 * kPointerSize + -7 * kRegisterSize - kXMMRegistersBlockSize;
#else
// We have 3 Push and 5 pushq in the JSEntryStub::GenerateBody.
- static const int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
+ static constexpr int kCallerFPOffset = -3 * kPointerSize + -5 * kRegisterSize;
#endif
- static const int kArgvOffset = 6 * kPointerSize;
+ static constexpr int kArgvOffset = 6 * kPointerSize;
};
class ExitFrameConstants : public TypedFrameConstants {
public:
- static const int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static constexpr int kSPOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
+ static constexpr int kCodeOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
DEFINE_TYPED_FRAME_SIZES(2);
- static const int kCallerFPOffset = +0 * kPointerSize;
- static const int kCallerPCOffset = kFPOnStackSize;
+ static constexpr int kCallerFPOffset = +0 * kPointerSize;
+ static constexpr int kCallerPCOffset = kFPOnStackSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+ static constexpr int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
- static const int kConstantPoolOffset = 0; // Not used
+ static constexpr int kConstantPoolOffset = 0; // Not used
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
- static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
- static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
- static const int kFunctionOffset = StandardFrameConstants::kFunctionOffset;
+ static constexpr int kLocal0Offset =
+ StandardFrameConstants::kExpressionsOffset;
+ static constexpr int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize;
+ static constexpr int kFunctionOffset =
+ StandardFrameConstants::kFunctionOffset;
// Caller SP-relative.
- static const int kParam0Offset = -2 * kPointerSize;
- static const int kReceiverOffset = -1 * kPointerSize;
+ static constexpr int kParam0Offset = -2 * kPointerSize;
+ static constexpr int kReceiverOffset = -1 * kPointerSize;
};
} // namespace internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index e09321e183..b30391fde8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -100,20 +100,6 @@ int64_t TurboAssembler::RootRegisterDelta(ExternalReference other) {
return delta;
}
-
-Operand MacroAssembler::ExternalOperand(ExternalReference target,
- Register scratch) {
- if (root_array_available_ && !serializer_enabled()) {
- int64_t delta = RootRegisterDelta(target);
- if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
- return Operand(kRootRegister, static_cast<int32_t>(delta));
- }
- }
- Move(scratch, target);
- return Operand(scratch, 0);
-}
-
-
void MacroAssembler::Load(Register destination, ExternalReference source) {
if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(source);
@@ -162,6 +148,18 @@ void TurboAssembler::LoadAddress(Register destination,
Move(destination, source);
}
+Operand TurboAssembler::ExternalOperand(ExternalReference target,
+ Register scratch) {
+ if (root_array_available_ && !serializer_enabled()) {
+ int64_t delta = RootRegisterDelta(target);
+ if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
+ return Operand(kRootRegister, static_cast<int32_t>(delta));
+ }
+ }
+ Move(scratch, target);
+ return Operand(scratch, 0);
+}
+
int TurboAssembler::LoadAddressSize(ExternalReference source) {
if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
@@ -1133,15 +1131,19 @@ void TurboAssembler::MoveNumber(Register dst, double value) {
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
- Xorpd(dst, dst);
+ Xorps(dst, dst);
} else {
+ unsigned nlz = base::bits::CountLeadingZeros(src);
+ unsigned ntz = base::bits::CountTrailingZeros(src);
unsigned pop = base::bits::CountPopulation(src);
DCHECK_NE(0u, pop);
- if (pop == 32) {
+ if (pop + ntz + nlz == 32) {
Pcmpeqd(dst, dst);
+ if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
+ if (nlz) Psrld(dst, static_cast<byte>(nlz));
} else {
movl(kScratchRegister, Immediate(src));
- Movq(dst, kScratchRegister);
+ Movd(dst, kScratchRegister);
}
}
}
@@ -1154,14 +1156,10 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
unsigned ntz = base::bits::CountTrailingZeros(src);
unsigned pop = base::bits::CountPopulation(src);
DCHECK_NE(0u, pop);
- if (pop == 64) {
+ if (pop + ntz + nlz == 64) {
Pcmpeqd(dst, dst);
- } else if (pop + ntz == 64) {
- Pcmpeqd(dst, dst);
- Psllq(dst, static_cast<byte>(ntz));
- } else if (pop + nlz == 64) {
- Pcmpeqd(dst, dst);
- Psrlq(dst, static_cast<byte>(nlz));
+ if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
+ if (nlz) Psrlq(dst, static_cast<byte>(nlz));
} else {
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
@@ -1178,23 +1176,23 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
// ----------------------------------------------------------------------------
void MacroAssembler::Absps(XMMRegister dst) {
- Andps(dst,
- ExternalOperand(ExternalReference::address_of_float_abs_constant()));
+ Andps(dst, ExternalOperand(
+ ExternalReference::address_of_float_abs_constant(isolate())));
}
void MacroAssembler::Negps(XMMRegister dst) {
- Xorps(dst,
- ExternalOperand(ExternalReference::address_of_float_neg_constant()));
+ Xorps(dst, ExternalOperand(
+ ExternalReference::address_of_float_neg_constant(isolate())));
}
void MacroAssembler::Abspd(XMMRegister dst) {
- Andps(dst,
- ExternalOperand(ExternalReference::address_of_double_abs_constant()));
+ Andps(dst, ExternalOperand(
+ ExternalReference::address_of_double_abs_constant(isolate())));
}
void MacroAssembler::Negpd(XMMRegister dst) {
- Xorps(dst,
- ExternalOperand(ExternalReference::address_of_double_neg_constant()));
+ Xorps(dst, ExternalOperand(
+ ExternalReference::address_of_double_neg_constant(isolate())));
}
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
@@ -1366,9 +1364,8 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
- Address bytes_address = reinterpret_cast<Address>(stream->bytes());
- Move(kOffHeapTrampolineRegister, bytes_address, RelocInfo::NONE);
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
jmp(kOffHeapTrampolineRegister);
}
@@ -1825,6 +1822,18 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
}
}
+void MacroAssembler::AssertConstructor(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
+ Push(object);
+ movq(object, FieldOperand(object, HeapObject::kMapOffset));
+ testb(FieldOperand(object, Map::kBitFieldOffset),
+ Immediate(Map::IsConstructorBit::kMask));
+ Pop(object);
+ Check(not_zero, AbortReason::kOperandIsNotAConstructor);
+ }
+}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
@@ -1885,6 +1894,13 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
}
}
+void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
+ cmpp(in_out, Immediate(kClearedWeakHeapObject));
+ j(equal, target_if_cleared);
+
+ andp(in_out, Immediate(~kWeakHeapObjectMask));
+}
+
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2109,31 +2125,13 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
- Label skip_hook, call_hook;
- ExternalReference debug_is_active =
- ExternalReference::debug_is_active_address(isolate());
- Operand debug_is_active_operand = ExternalOperand(debug_is_active);
- cmpb(debug_is_active_operand, Immediate(0));
- j(equal, &skip_hook);
-
+ Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
cmpb(debug_hook_active_operand, Immediate(0));
- j(not_equal, &call_hook);
-
- movp(kScratchRegister,
- FieldOperand(fun, JSFunction::kSharedFunctionInfoOffset));
- movp(kScratchRegister,
- FieldOperand(kScratchRegister, SharedFunctionInfo::kDebugInfoOffset));
- JumpIfSmi(kScratchRegister, &skip_hook);
- movp(kScratchRegister,
- FieldOperand(kScratchRegister, DebugInfo::kFlagsOffset));
- SmiToInteger32(kScratchRegister, kScratchRegister);
- testp(kScratchRegister, Immediate(DebugInfo::kBreakAtEntry));
- j(zero, &skip_hook);
-
- bind(&call_hook);
+ j(equal, &skip_hook);
+
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2493,6 +2491,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
+ // TODO(tebbi): Perhaps, we want to put an lfence here.
Set(kSpeculationPoisonRegister, -1);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index faa0462cd1..564aa8d58d 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -20,9 +20,9 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r9;
+constexpr Register kSpeculationPoisonRegister = r12;
constexpr Register kInterpreterAccumulatorRegister = rax;
-constexpr Register kInterpreterBytecodeOffsetRegister = r12;
+constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r14;
constexpr Register kInterpreterDispatchTableRegister = r15;
constexpr Register kJavaScriptCallArgCountRegister = rax;
@@ -187,7 +187,9 @@ class TurboAssembler : public Assembler {
AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pslld, pslld)
AVX_OP(Psllq, psllq)
+ AVX_OP(Psrld, psrld)
AVX_OP(Psrlq, psrlq)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
@@ -208,6 +210,7 @@ class TurboAssembler : public Assembler {
AVX_OP(Cmpnlepd, cmpnlepd)
AVX_OP(Roundss, roundss)
AVX_OP(Roundsd, roundsd)
+ AVX_OP(Sqrtss, sqrtss)
AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
@@ -355,6 +358,15 @@ class TurboAssembler : public Assembler {
// register.
void LoadAddress(Register destination, ExternalReference source);
+ // Operand pointing to an external reference.
+ // May emit code to set up the scratch register. The operand is
+ // only guaranteed to be correct as long as the scratch register
+ // isn't changed.
+ // If the operand is used more than once, use a scratch register
+ // that is guaranteed not to be clobbered.
+ Operand ExternalOperand(ExternalReference reference,
+ Register scratch = kScratchRegister);
+
void Call(Operand op);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
@@ -541,14 +553,6 @@ class MacroAssembler : public TurboAssembler {
bool old_value_;
};
- // Operand pointing to an external reference.
- // May emit code to set up the scratch register. The operand is
- // only guaranteed to be correct as long as the scratch register
- // isn't changed.
- // If the operand is used more than once, use a scratch register
- // that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
// Loads and stores the value of an external reference.
// Special case code for load and store to take advantage of
// load_rax/store_rax if possible/necessary.
@@ -766,7 +770,7 @@ class MacroAssembler : public TurboAssembler {
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
// Generates a trampoline to jump to the off-heap instruction stream.
- void JumpToInstructionStream(const InstructionStream* stream);
+ void JumpToInstructionStream(Address entry);
// Non-x64 instructions.
// Push/pop all general purpose registers.
@@ -809,6 +813,9 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
// Abort execution if argument is not a JSFunction, enabled via --debug-code.
void AssertFunction(Register object);
@@ -885,6 +892,9 @@ class MacroAssembler : public TurboAssembler {
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register in_out, Label* target_if_cleared);
// ---------------------------------------------------------------------------
// Debugging
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 470f4c4177..c11973250c 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -147,7 +147,7 @@ Address Zone::NewExpand(size_t size) {
const size_t min_new_size = kSegmentOverhead + size;
// Guard against integer overflow.
if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
- V8::FatalProcessOutOfMemory("Zone");
+ V8::FatalProcessOutOfMemory(nullptr, "Zone");
return nullptr;
}
if (segment_size_ == SegmentSize::kLarge) {
@@ -163,12 +163,12 @@ Address Zone::NewExpand(size_t size) {
new_size = Max(min_new_size, kMaximumSegmentSize);
}
if (new_size > INT_MAX) {
- V8::FatalProcessOutOfMemory("Zone");
+ V8::FatalProcessOutOfMemory(nullptr, "Zone");
return nullptr;
}
Segment* segment = NewSegment(new_size);
if (segment == nullptr) {
- V8::FatalProcessOutOfMemory("Zone");
+ V8::FatalProcessOutOfMemory(nullptr, "Zone");
return nullptr;
}